testthat/0000755000176200001440000000000015130664352012117 5ustar liggesuserstestthat/tests/0000755000176200001440000000000015130237623013256 5ustar liggesuserstestthat/tests/test-catch.R0000644000176200001440000000321715047715224015450 0ustar liggesuserslibrary(testthat) local({ if (!requireNamespace("usethis", quietly = TRUE)) { return() } quietly <- function(expr) { suppressMessages(capture_output(result <- expr)) result } perform_test <- function(pkgName, catchEnabled) { owd <- setwd(tempdir()) withr::defer(setwd(owd)) pkgPath <- file.path(tempdir(), pkgName) libPath <- file.path(tempdir(), "rlib") if (!utils::file_test("-d", libPath)) { dir.create(libPath) } .libPaths(c(libPath, .libPaths())) withr::defer({ unlink(pkgPath, recursive = TRUE) unlink(libPath, recursive = TRUE) }) quietly(usethis::create_package(pkgPath, open = FALSE)) quietly(testthat::use_catch(pkgPath)) cat( "LinkingTo: testthat", file = file.path(pkgPath, "DESCRIPTION"), append = TRUE, sep = "\n" ) cat( sprintf("useDynLib(%s, .registration=TRUE)", pkgName), file = file.path(pkgPath, "NAMESPACE"), append = TRUE, sep = "\n" ) if (!catchEnabled) { isWindows <- Sys.info()[["sysname"]] == "Windows" makevarsPath <- file.path( pkgPath, "src", if (isWindows) "Makevars.win" else "Makevars" ) cat( "PKG_CPPFLAGS = -DTESTTHAT_DISABLED", file = makevarsPath, sep = "\n" ) } install.packages(pkgs = pkgPath, repos = NULL, type = "source") library(pkgName, character.only = TRUE) stopifnot(.Call("run_testthat_tests", FALSE, PACKAGE = pkgName)) pkgload::unload(pkgName) } withr::local_envvar(R_TESTS = '') perform_test("testthatclient1", TRUE) perform_test("testthatclient2", FALSE) }) testthat/tests/testthat/0000755000176200001440000000000015130664352015121 5ustar liggesuserstestthat/tests/testthat/test-reporter-silent.R0000644000176200001440000000030515044126231021346 0ustar liggesuserstest_that("captures expectations; doesn't produce any output", { reporter <- SilentReporter$new() expect_snapshot_reporter(reporter) expect_snapshot_value(length(reporter$expectations())) }) testthat/tests/testthat/test-mock-oo.R0000644000176200001440000000641715127561732017601 0ustar liggesusers# S3 -------------------------------------------------------------------------- test_that("can mock S3 methods", { x <- as.POSIXlt(Sys.time()) local({ local_mocked_s3_method("length", "POSIXlt", function(x) 42) expect_length(x, 42) }) expect_length(x, 1) }) test_that("validates its inputs", { expect_snapshot(error = TRUE, { local_mocked_s3_method(1) local_mocked_s3_method("mean", 1) local_mocked_s3_method("mean", "bar", 1) local_mocked_s3_method("notAGeneric", "bar", function() {}) }) }) test_that("can mock S3 method that doesn't exist yet", { x <- structure(list(), class = "test_mock_class") local({ local_mocked_s3_method("length", "test_mock_class", function(x) 42) expect_length(x, 42) }) # Method should be removed after scope ends expect_length(x, 0) }) test_that("can temporarily remove S3 method with NULL", { x <- structure(list(), class = "test_mock_class2") local_mocked_s3_method("length", "test_mock_class2", function(x) 42) local({ # Now remove it local_mocked_s3_method("length", "test_mock_class2", NULL) expect_length(x, 0) }) # Method should be restored after scope ends expect_length(x, 42) }) # S4 -------------------------------------------------------------------------- test_that("can mock S4 methods", { jim <- TestMockPerson(name = "Jim", age = 32) local({ local_mocked_s4_method("mock_age", "TestMockPerson", function(x) 42) expect_equal(mock_age(jim), 42) }) expect_equal(mock_age(jim), 32) }) test_that("validates its inputs", { expect_snapshot(error = TRUE, { local_mocked_s4_method(1) local_mocked_s4_method("mean", 1) local_mocked_s4_method("mean", "bar", 1) local_mocked_s4_method("notAGeneric", "bar", function() {}) }) }) test_that("can mock S4 method that doesn't exist yet", { jim <- TestMockPerson(name = "Jim", age = 32) local({ local_mocked_s4_method("show", "TestMockPerson", function(object) { cat("Person:", object@name, "\n") }) expect_output(show(jim), "Person: Jim") }) expect_null(methods::getMethod("show", "TestMockPerson", optional = TRUE)) }) test_that("can temporarily remove S4 method with NULL", { jim <- TestMockPerson(name = "Jim", age = 32) expect_equal(mock_age(jim), 32) local({ local_mocked_s4_method("mock_age", "TestMockPerson", NULL) # Method is removed, so this should error expect_error(mock_age(jim), "unable to find") }) expect_equal(mock_age(jim), 32) }) # R6 -------------------------------------------------------------------------- test_that("can mock R6 methods", { local({ local_mocked_r6_class(TestMockClass, public = list(sum = function() 2)) obj <- TestMockClass$new() expect_equal(obj$sum(), 2) }) obj <- TestMockClass$new() expect_equal(obj$sum(), 4321) }) test_that("can mock all R6 components", { local_mocked_r6_class( TestMockClass, public = list(public_fun = function() 0, public_val = 0), private = list(private_fun = function() 0, private_val = 0) ) obj <- TestMockClass$new() expect_equal(obj$sum(), 0) }) test_that("validates its inputs", { expect_snapshot(error = TRUE, { local_mocked_r6_class(mean) local_mocked_r6_class(TestMockClass, public = 1) local_mocked_r6_class(TestMockClass, private = 1) }) }) testthat/tests/testthat/test-snapshot-file.R0000644000176200001440000001275515111021366020776 0ustar liggesuserstest_that("expect_snapshot_file works", { path <- write_tmp_lines(letters) expect_snapshot_file(path, "foo.r", compare = compare_file_text) path <- withr::local_tempfile() png(path, width = 300, height = 300, type = "cairo") plot(1:10, xlab = "", ylab = "", pch = 20, cex = 5, axes = FALSE) dev.off() expect_snapshot_file(path, "foo.png") path <- withr::local_tempfile() mtcars2 <- mtcars[1:5, 1:4] write.csv(mtcars2, path) expect_snapshot_file(path, "foo.csv", compare = compare_file_text) # Deprecated `binary` argument still works withr::local_options(lifecycle_verbosity = "quiet") expect_snapshot_file(path, "foo-not-binary.csv", binary = FALSE) }) test_that("expect_snapshot_file works in a different directory", { path <- withr::local_tempdir() withr::local_dir(path) brio::write_lines("a", "a.txt", eol = "\r\n") # expect no warning expect_no_warning(expect_snapshot_file("a.txt")) }) test_that("expect_snapshot_file works with variant", { local_on_cran(FALSE) expect_snapshot_file( write_tmp_lines(r_version()), "version.txt", compare = compare_file_text, variant = r_version() ) }) test_that("expect_snapshot_file finds duplicate snapshot files", { local_on_cran(FALSE) expect_snapshot( expect_snapshot_file( write_tmp_lines(r_version()), "version.txt", variant = r_version() ), error = TRUE ) }) test_that("basic workflow", { local_on_cran(FALSE) snapper <- local_test_snapshotter() path <- write_tmp_lines(letters) # warns on first run snapper$start_file("snapshot-6", "test") expect_warning(expect_snapshot_file(path, "letters.txt"), "Adding new") snapper$end_file() # succeeds if unchanged snapper$start_file("snapshot-6", "test") expect_success(expect_snapshot_file(path, "letters.txt")) snapper$end_file() # fails if changed snapper$start_file("snapshot-6", "test") path2 <- write_tmp_lines(letters[-1]) expect_failure(expect_snapshot_file(path2, "letters.txt"), "has changed") snapper$end_file() }) test_that("can announce snapshot file", { snapper <- local_test_snapshotter() snapper$start_file("snapshot-announce", "test") announce_snapshot_file(name = "bar.svg") expect_equal(snapper$snap_file_seen, "snapshot-announce/bar.svg") }) test_that("can transform snapshot contents", { path <- withr::local_tempfile(lines = c("secret", "ssh secret squirrel")) redact <- function(x) gsub("secret", "", x) expect_snapshot_file(path, "secret.txt", transform = redact) }) # snapshot_file_equal ----------------------------------------------------- test_that("warns on first creation", { path <- write_tmp_lines("a") snap_dir <- withr::local_tempdir() snapshot_file_equal_ <- function(path) { snapshot_file_equal( snap_dir = snap_dir, snap_test = "my-test", snap_name = "test.txt", snap_variant = NULL, path = path, fail_on_new = FALSE ) } # Warns on first run expect_snapshot(out <- snapshot_file_equal_(path)) expect_true(out) # Errors on non-existing file expect_snapshot(snapshot_file_equal_("doesnt-exist.txt"), error = TRUE) # Unchanged returns TRUE expect_true(snapshot_file_equal_(path)) expect_true(file.exists(file.path(snap_dir, "my-test/test.txt"))) expect_false(file.exists(file.path(snap_dir, "my-test/test.new.txt"))) # Changed returns FALSE path2 <- write_tmp_lines("b") expect_false(snapshot_file_equal_(path2)) expect_true(file.exists(file.path(snap_dir, "my-test/test.txt"))) expect_true(file.exists(file.path(snap_dir, "my-test/test.new.txt"))) # Changing again overwrites path2 <- write_tmp_lines("c") expect_false(snapshot_file_equal_(path2)) expect_equal( brio::read_lines(file.path(snap_dir, "my-test/test.new.txt")), "c" ) # Unchanged cleans up expect_true(snapshot_file_equal_(path)) expect_true(file.exists(file.path(snap_dir, "my-test/test.txt"))) expect_false(file.exists(file.path(snap_dir, "my-test/test.new.txt"))) }) # helpers ----------------------------------------------------------------- test_that("text comparison ignores CR", { path1 <- write_tmp_lines(c("a", "b")) path2 <- write_tmp_lines(c("a", "b"), eol = "\r\n") expect_false(compare_file_binary(path1, path2)) expect_true(compare_file_text(path1, path2)) }) test_that("split_path handles edge cases", { expect_equal(split_path(""), list(dir = "", name = "", ext = "")) expect_equal(split_path("a"), list(dir = "", name = "a", ext = "")) expect_equal(split_path("a"), list(dir = "", name = "a", ext = "")) expect_equal(split_path(".b"), list(dir = "", name = "", ext = "b")) expect_equal(split_path(".b.c"), list(dir = "", name = "", ext = "b.c")) expect_equal(split_path("x/a"), list(dir = "x", name = "a", ext = "")) expect_equal(split_path("x/a"), list(dir = "x", name = "a", ext = "")) expect_equal(split_path("x/.b"), list(dir = "x", name = "", ext = "b")) expect_equal(split_path("x/.b.c"), list(dir = "x", name = "", ext = "b.c")) }) test_that("generates informative hint", { local_mocked_bindings(in_check_reporter = function() FALSE) withr::local_envvar(GITHUB_ACTIONS = "false", TESTTHAT_WD = NA) expect_snapshot(snapshot_hint("lala", reset_output = FALSE)) }) test_that("expect_snapshot_file validates its inputs", { path <- withr::local_tempfile(lines = "x") expect_snapshot(error = TRUE, { expect_snapshot_file(123) expect_snapshot_file("doesnt-exist.txt") expect_snapshot_file(path, 123) expect_snapshot_file(path, "test.txt", cran = "yes") }) }) testthat/tests/testthat/test-test-example.R0000644000176200001440000000064115054053615020631 0ustar liggesuserstest_that("can test documentation from path or Rd object", { rd_path <- test_path("../../man/expect_length.Rd") skip_if_not(file.exists(rd_path)) capture_output({ test_example(rd_path) test_rd(tools::parse_Rd(rd_path)) }) }) test_that("returns false if no examples", { rd_path <- test_path("../../man/test_examples.Rd") skip_if_not(file.exists(rd_path)) expect_false(test_example(rd_path)) }) testthat/tests/testthat/test-compare.R0000644000176200001440000001431415075703550017654 0ustar liggesuserstest_that("list comparison truncates to max_diffs", { x <- as.list(as.character(1:1e3)) y <- lapply(x, paste0, ".") lines1 <- strsplit(compare(x, y)$message, "\n")[[1]] expect_length(lines1, 10) lines2 <- strsplit(compare(x, y, max_diffs = 99)$message, "\n")[[1]] expect_length(lines2, 100) }) test_that("no diff", { expect_equal(compare(1, 1), no_difference()) }) test_that("vector_equal_tol handles infinity", { expect_true(vector_equal_tol(Inf, Inf)) expect_true(vector_equal_tol(-Inf, -Inf)) expect_false(vector_equal_tol(Inf, -Inf)) expect_false(vector_equal_tol(Inf, 0)) }) test_that("vector_equal_tol handles na", { expect_true(vector_equal_tol(NA, NA)) expect_false(vector_equal_tol(NA, 0)) }) # character --------------------------------------------------------------- test_that("types must be the same", { expect_match(compare("a", 1L)$message, "character is not integer") }) test_that("base lengths must be identical", { expect_match(compare("a", letters)$message, "1 is not 26") }) test_that("classes must be identical", { c1 <- "a" c2 <- structure("a", class = "mycharacter") expect_match(compare(c1, c2)$message, "\"character\" is not \"mycharacter\"") }) test_that("attributes must be identical", { x1 <- "a" x2 <- c(a = "a") x3 <- c(b = "a") x4 <- structure("a", a = 1) x5 <- structure("a", b = 1) expect_match(compare(x1, x2)$message, "names for current") expect_match(compare(x2, x3)$message, "Names: 1 string mismatch") expect_match(compare(x1, x4)$message, "target is NULL") expect_match(compare(x4, x5)$message, "Names: 1 string mismatch") }) test_that("two identical vectors are the same", { expect_true(compare(letters, letters)$equal) }) test_that("equal if both missing or both the same (multiple values)", { expect_true(compare(c("ABC", NA), c("ABC", NA))$equal) expect_false(compare(c(NA, NA), c("ABC", NA))$equal) expect_false(compare(c("AB", NA), c("ABC", NA))$equal) expect_false(compare(c("AB", "AB"), c("ABC", "AB"))$equal) }) test_that("computes correct number of mismatches", { x <- mismatch_character(c("a", "b", "c"), c("c", "d", "e")) expect_equal(x$n, 3) }) test_that("only differences are shown", { x <- mismatch_character(letters, c(letters[-26], "a")) lines <- strsplit(format(x), "\n")[[1]] expect_equal(lines[1], "1/26 mismatches") expect_equal(lines[2], 'x[26]: "z"') }) test_that("not all lines are shown", { a <- "1234567890" b <- paste(rep(a, 10), collapse = "") x <- mismatch_character(a, b) lines <- strsplit(format(x, width = 16), "\n")[[1]] expect_equal(lines[1], "1/1 mismatches") expect_equal(length(lines), 8) }) test_that("vectors longer than `max_diffs` (#513)", { comp <- compare(letters[1:2], LETTERS[1:2], max_diffs = 1) expect_s3_class(comp, "comparison") expect_false(comp$equal) expect_equal(comp$message, "2/2 mismatches\nx[1]: \"a\"\ny[1]: \"A\"") }) # numeric ------------------------------------------------------------------ test_that("numeric types are compatible", { expect_true(compare(1, 1L)$equal) expect_true(compare(1L, 1)$equal) }) test_that("non-numeric types are not compatible", { expect_match(compare(1, "a")$message, "double is not character") }) test_that("base lengths must be identical", { expect_match(compare(1, c(1, 2))$message, "1 is not 2") }) test_that("classes must be identical", { f1 <- factor("a") f2 <- factor("a", ordered = TRUE) expect_match(compare(1L, f1)$message, "\"integer\" is not \"factor\"") expect_match( compare(1L, f2)$message, "\"integer\" is not \"ordered\"/\"factor\"" ) }) test_that("attributes must be identical", { x1 <- 1L x2 <- c(a = 1L) x3 <- c(b = 1L) x4 <- structure(1L, a = 1) x5 <- structure(1L, b = 1) expect_match(compare(x1, x2)$message, "names for current") expect_match(compare(x2, x3)$message, "Names: 1 string mismatch") expect_match(compare(x1, x4)$message, "target is NULL") expect_match(compare(x4, x5)$message, "Names: 1 string mismatch") }) test_that("unless check.attributes is FALSE", { x1 <- 1L x2 <- c(a = 1L) x3 <- structure(1L, a = 1) expect_equal(compare(x1, x2, check.attributes = FALSE)$message, "Equal") expect_equal(compare(x1, x3, check.attributes = FALSE)$message, "Equal") expect_equal(compare(x2, x3, check.attributes = FALSE)$message, "Equal") }) test_that("two identical vectors are the same", { expect_true(compare(1:10, 1:10)$equal) }) test_that("named arguments to all.equal passed through", { expect_equal(415, 416, tolerance = 0.01) }) test_that("tolerance used for individual comparisons", { x1 <- 1:3 x2 <- x1 + c(0, 0, 0.1) expect_false(compare(x1, x2)$equal) expect_true(compare(x1, x2, tolerance = 0.1)$equal) }) test_that("mismatch_numeric truncates diffs", { x <- mismatch_numeric(1:11, 11:1) expect_equal(x$n, 11) expect_equal(x$n_diff, 10) lines <- strsplit(format(x, max_diffs = 5), "\n")[[1]] expect_equal(length(lines), 5 + 2) }) # time -------------------------------------------------------------------- test_that("both POSIXt classes are compatible", { x1 <- Sys.time() x2 <- as.POSIXlt(x1) expect_true(compare(x1, x2)$equal) expect_true(compare(x2, x1)$equal) }) test_that("other classes are not", { expect_match( compare(Sys.time(), 1)$message, "\"POSIXct\"/\"POSIXt\" is not \"numeric\"" ) }) test_that("base lengths must be identical", { x1 <- Sys.time() x2 <- c(x1, x1 - 3600) expect_match(compare(x1, x2)$message, "1 is not 2") }) test_that("tzones must be identical", { # skip on minimal setups tryCatch( { t1 <- ISOdatetime(2016, 2, 29, 12, 13, 14, "EST") t2 <- ISOdatetime(2016, 2, 29, 12, 13, 14, "US/Eastern") }, warning = function(w) skip(conditionMessage(w)) ) expect_match(compare(t1, t2)$message, '"tzone": 1 string mismatch') }) test_that("two identical vectors are the same", { x <- Sys.time() expect_true(compare(x, x)$equal) }) test_that("two different values are not the same", { x1 <- Sys.time() x2 <- x1 + 3600 expect_false(compare(x1, x2)$equal) }) test_that("uses all.equal tolerance", { x1 <- structure(1457284588.83749, class = c("POSIXct", "POSIXt")) x2 <- structure(1457284588.837, class = c("POSIXct", "POSIXt")) expect_true(compare(x1, x2)$equal) }) testthat/tests/testthat/test-verify-conditions-lines.txt0000644000176200001440000000023515127731052023420 0ustar liggesusers> message("First.\nSecond.") Message: First. Second. > warning("First.\nSecond.") Warning: First. Second. > stop("First.\nSecond.") Error: First. Second. testthat/tests/testthat/test-test-that.R0000644000176200001440000000731315072252215020136 0ustar liggesuserstest_that("can't access variables from other tests (1)", { a <- 10 expect_true(TRUE) }) test_that("can't access variables from other tests (2)", { expect_false(exists("a")) }) test_that("messages are suppressed", { local_edition(2) message("YOU SHOULDN'T SEE ME") pass() }) test_that("errors are captured", { f <- function() g() g <- function() stop("I made a mistake", call. = FALSE) reporter <- with_reporter("silent", { test_that("", { f() }) }) expect_equal(length(reporter$expectations()), 1) }) test_that("errors captured even when looking for messages", { reporter <- with_reporter("silent", { test_that("", { expect_message(stop("a")) }) }) expect_equal(length(reporter$expectations()), 1) expect_true(expectation_error(reporter$expectations()[[1L]])) }) test_that("errors captured even when looking for warnings", { reporter <- with_reporter("silent", { test_that("", { expect_warning(stop()) }) }) expect_equal(length(reporter$expectations()), 1) expect_true(expectation_error(reporter$expectations()[[1L]])) }) test_that("failures are errors", { f <- function() { expect_true(FALSE) expect_false(TRUE) } expect_error(f(), class = "expectation_failure") }) test_that("infinite recursion is captured", { skip_on_covr() f <- function() f() reporter <- with_reporter("silent", { withr::with_options( list(expressions = sys.nframe() + 100), test_that("", { f() }) ) }) expect_equal(length(reporter$expectations()), 1) }) test_that("return value from test_that", { with_reporter( "", success <- test_that("success", { pass() }) ) expect_true(success) with_reporter( "", success <- test_that("success", { succeed("Yes!") }) ) expect_true(success) with_reporter( "", error <- test_that("error", { barf }) ) expect_false(error) with_reporter( "", failure <- test_that("failure", { expect_true(FALSE) }) ) expect_false(failure) with_reporter( "", failure <- test_that("failure", { fail() }) ) expect_false(failure) with_reporter( "", success <- test_that("failure", { fail() }) ) expect_false(success) with_reporter( "", skip <- test_that("skip", { skip("skipping") }) ) expect_false(skip) }) test_that("empty test skips automatically", { expectations <- capture_expectations(skip <- test_that("success", {})) expect_false(skip) expect_s3_class(expectations[[1]], "expectation_skip") }) test_that("nested tests skipped correctly", { expectations <- capture_expectations({ describe("outer", { it("1") it("2", expect_true(TRUE)) }) }) expect_length(expectations, 2) expect_s3_class(expectations[[1]], "expectation_skip") expect_s3_class(expectations[[2]], "expectation_success") }) test_that("can signal warnings and messages without restart", { expect_null(signalCondition(message_cnd("foo"))) return("Skipping following test because it verbosely registers the warning") expect_null(signalCondition(warning_cnd("foo"))) }) test_that("no braces required in testthat 2e", { local_edition(2) expect_warning( test_that("", expect_true(TRUE)), NA ) }) test_that("missing packages cause a skip on CRAN", { local_on_cran(TRUE) expectations <- capture_expectations(test_that("", { library(notinstalled) })) expect_length(expectations, 1) expect_s3_class(expectations[[1]], "expectation_skip") local_on_cran(FALSE) expectations <- capture_expectations(test_that("", { library(notinstalled) })) expect_length(expectations, 1) expect_s3_class(expectations[[1]], "expectation_error") }) testthat/tests/testthat/test-expect-known.R0000644000176200001440000000703715047715224020654 0ustar liggesuserslocal_edition(2) # expect_known_output ----------------------------------------------------- test_that("uses specified width", { old <- options(width = 20) withr::defer(options(old)) x <- 1:100 expect_known_output(print(x), "width-80.txt") }) test_that("creates file on first run", { file <- withr::local_tempfile() expect_known_output(cat("ok!\n"), file) |> expect_success() |> expect_warning("Creating reference") expect_true(file.exists(file)) }) test_that("ignores incomplete last line", { file <- withr::local_tempfile() writeLines("Hi!", file) expect_success(expect_known_output(cat("Hi!"), file)) expect_success(expect_known_output(cat("Hi!\n"), file)) expect_failure(expect_known_output(cat("Hi!\n\n"), file)) expect_failure(expect_known_output(cat("oops"), file)) }) test_that("updates by default", { file <- withr::local_tempfile() writeLines("Hi!", file) expect_failure(expect_known_output(cat("oops"), file, update = FALSE)) expect_equal(readLines(file), "Hi!") expect_failure(expect_known_output(cat("oops"), file, update = TRUE)) expect_success(expect_known_output(cat("oops"), file)) }) test_that("works with utf-8 output", { skip_on_cran() skip_on_os("windows") text <- c("\u00fc", "\u2a5d", "\u6211", "\u0438") expect_known_output(cat(text, sep = "\n"), "test-expect-known.txt") }) test_that("Warning for non-UTF-8 reference files", { x <- "\xe9\xe1\xed\xf6\xfc" Encoding(x) <- "latin1" tmp <- tempfile() withr::defer(unlink(tmp)) writeBin(x, tmp) suppressWarnings( expect_failure( expect_known_output("foobar", tmp, update = FALSE) ) ) }) # expect_known_value ------------------------------------------------------ test_that("correctly matches to a file", { x <- 1 expect_success(expect_known_value(x, "one.rds")) x <- 2 expect_failure(expect_known_value(x, "one.rds", update = FALSE)) }) test_that("first run is successful", { expect_known_value(2, "two.rds") |> expect_success() |> expect_warning("Creating reference") unlink("two.rds") }) test_that("equal_to_ref does not overwrite existing", { tmp_rds <- tempfile(fileext = ".rds") withr::defer(unlink(tmp_rds)) ref_obj1 <- 1:3 ref_obj2 <- 2:4 saveRDS(ref_obj1, tmp_rds) expect_success(expect_equal_to_reference(ref_obj1, tmp_rds)) # Failure does not update object expect_failure(expect_equal_to_reference(ref_obj2, tmp_rds)) expect_equal(readRDS(tmp_rds), ref_obj1) # Now failure does update object expect_failure(expect_equal_to_reference(ref_obj2, tmp_rds, update = TRUE)) expect_success(expect_equal_to_reference(ref_obj2, tmp_rds)) }) test_that("serializes to version 2 by default", { tmp_rds <- tempfile(fileext = ".rds") withr::defer(unlink(tmp_rds)) expect_warning( expect_known_value("a", tmp_rds), "Creating reference" ) expect_identical(tools:::get_serialization_version(tmp_rds)[[1]], 2L) }) test_that("version 3 is possible", { tmp_rds <- tempfile(fileext = ".rds") withr::defer(unlink(tmp_rds)) expect_warning( expect_known_value("a", tmp_rds, version = 3), "Creating reference" ) expect_identical(tools:::get_serialization_version(tmp_rds)[[1]], 3L) }) # expect_known_hash ------------------------------------------------------- test_that("empty hash succeeds with warning", { expect_known_hash(1:10) |> expect_success() |> expect_warning("No recorded hash") }) test_that("only succeeds if hash is correct", { expect_success(expect_known_hash(1:10, "c08951d2c2")) expect_failure(expect_known_hash(1:10, "c08951d2c3")) }) testthat/tests/testthat/context.R0000644000176200001440000000050314313315375016726 0ustar liggesuserscontext("First context.") test_that("Logical equivalence", { x <- TRUE expect_equal(x, TRUE) }) test_that("Numerical equivalence", { x <- 1 expect_equal(x, 1) }) context("Second context.") test_that("A passing test", { expect_equal(TRUE, TRUE) }) test_that("A failing test", { expect_equal(TRUE, FALSE) }) testthat/tests/testthat/test-watcher.R0000644000176200001440000000504015047427364017664 0ustar liggesuserstest_that("compare state works correctly", { loc <- withr::local_tempfile(pattern = "watcher") dir.create(loc) empty <- dir_state(loc) expect_equal(length(empty), 0) file.create(file.path(loc, "test-1.txt")) one <- dir_state(loc) expect_equal(length(one), 1) expect_equal(basename(names(one)), "test-1.txt") diff <- compare_state(empty, one) expect_equal(diff$n, 1) expect_equal(basename(diff$added), "test-1.txt") write.table(mtcars, file.path(loc, "test-1.txt")) diff <- compare_state(one, dir_state(loc)) expect_equal(diff$n, 1) expect_equal(basename(diff$modified), "test-1.txt") file.rename(file.path(loc, "test-1.txt"), file.path(loc, "test-2.txt")) diff <- compare_state(one, dir_state(loc)) expect_equal(diff$n, 2) expect_equal(basename(diff$deleted), "test-1.txt") expect_equal(basename(diff$added), "test-2.txt") diff <- compare_state( c(file1 = "62da2", file2 = "e14a6", file3 = "6e6dd"), c(file1 = "62da2", file2 = "e14a6", file21 = "532fa", file3 = "3f4sa") ) expect_equal(diff$n, 2) expect_equal(basename(diff$added), "file21") expect_equal(basename(diff$modified), "file3") }) test_that("watcher works correctly", { skip_on_ci() skip_on_os("windows") skip_on_cran() if (Sys.which("bash") == "") { skip("bash not available") } if (system("bash -c 'which touch'", ignore.stdout = TRUE) != 0L) { skip("touch (or which) not available") } loc <- withr::local_tempfile(pattern = "watcher") dir.create(loc) code_path <- file.path(loc, "R") test_path <- file.path(loc, "tests") dir.create(code_path) dir.create(test_path) delayed.bash.cmd <- function(command) { system(paste0("bash -c 'sleep 1;", command, "'"), wait = FALSE) } add.code.file <- function(file.name) { delayed.bash.cmd(paste0("touch ", file.path(code_path, file.name))) } remove.code.file <- function(file.name) { delayed.bash.cmd(paste0("rm ", file.path(code_path, file.name))) } test.added <- function(added, deleted, modified) { expect_equal(length(added), 1) expect_equal(grepl("test1.R", added), TRUE) expect_equal(length(deleted), 0) expect_equal(length(modified), 0) FALSE } test.removed <- function(added, deleted, modified) { expect_equal(length(added), 0) expect_equal(length(deleted), 1) expect_equal(grepl("test1.R", deleted), TRUE) expect_equal(length(modified), 0) FALSE } add.code.file("test1.R") watch(c(code_path, test_path), test.added) remove.code.file("test1.R") watch(c(code_path, test_path), test.removed) }) testthat/tests/testthat/test-reporter-stop.R0000644000176200001440000000170315056632045021050 0ustar liggesusers# We can't use expect_snapshot_reporter() here because it uses test_one_file() # which wraps code in `test_code()` which turns the error into a test failure # It also only captures the output, but we also want to see the error test_that("produces useful output", { run_tests <- \() source(test_path("reporters/tests.R")) expect_snapshot(with_reporter("stop", run_tests()), error = TRUE) }) test_that("can suppress praise", { run_tests <- \() source(test_path("reporters/successes.R")) expect_silent(with_reporter(StopReporter$new(praise = FALSE), run_tests())) }) test_that("works nicely with nested tests", { run_tests <- \() source(test_path("reporters/nested.R")) expect_snapshot(with_reporter("stop", run_tests()), error = TRUE) }) test_that("errors when needed", { r <- StopReporter$new() r$start_test() expect_no_error(r$end_test()) r$start_test() r$n_fail <- 1 r$n_success <- 0 expect_snapshot(error = TRUE, r$end_test()) }) testthat/tests/testthat/test-colour-text.R0000644000176200001440000000100315047715224020502 0ustar liggesuserstest_that("can suppress colours", { op <- options( crayon.enabled = TRUE, testthat.use_colours = TRUE, cli.num_colors = 8L ) check <- cli::ansi_has_any(colourise("X")) # Must restore original options before expectation is triggered options(op) expect_true(check) }) test_that("We don't have colours if we don't want to", { op <- options( crayon.enabled = TRUE, testthat.use_colours = FALSE ) check <- cli::ansi_has_any(colourise("X")) options(op) expect_false(check) }) testthat/tests/testthat/test-expect-inheritance.R0000644000176200001440000001045615054145645022012 0ustar liggesuserstest_that("expect_type checks typeof", { expect_success(expect_type(factor("a"), "integer")) x <- factor("a") expect_snapshot_failure(expect_type(x, "double")) }) test_that("expect_type validates its inputs", { expect_snapshot(error = TRUE, { expect_type(1, c("integer", "double")) }) }) test_that("expect_is checks class", { local_edition(2) expect_success(expect_is(factor("a"), "factor")) expect_snapshot_failure(expect_is(factor("a"), "integer")) }) test_that("expect_s3/s4_class fails if appropriate type", { A <- methods::setClass("A", contains = "list") x1 <- 1 x2 <- A() x3 <- factor("a") expect_snapshot_failure(expect_s3_class(x1, "double")) expect_snapshot_failure(expect_s3_class(x2, "double")) expect_snapshot_failure(expect_s4_class(x3, "double")) }) test_that("expect_s[34]_class can check not S3/S4", { expect_success(expect_s3_class(1, NA)) expect_snapshot_failure(expect_s3_class(factor(), NA)) A <- methods::setClass("A", contains = "list") expect_success(expect_s4_class(1, NA)) expect_snapshot_failure(expect_s4_class(A(), NA)) }) test_that("test_s4_class respects class hierarchy", { A <- methods::setClass("A", contains = "list") B <- methods::setClass("B", contains = "list") C <- methods::setClass("C", contains = c("A", "B")) withr::defer({ methods::removeClass("A") methods::removeClass("B") methods::removeClass("C") }) expect_success(expect_s4_class(C(), "A")) expect_success(expect_s4_class(C(), "B")) expect_snapshot_failure(expect_s4_class(C(), "D")) }) test_that("expect_s3_class validates its inputs", { expect_snapshot(error = TRUE, { expect_s3_class(factor("a"), 1) expect_s3_class(factor("a"), "factor", exact = "yes") }) }) test_that("test_s3_class respects class hierarchy", { x <- structure(list(), class = c("a", "b")) expect_success(expect_s3_class(x, "a")) expect_success(expect_s3_class(x, "b")) expect_snapshot_failure(expect_s3_class(x, "c")) expect_snapshot_failure(expect_s3_class(x, c("c", "d"))) }) test_that("test_s3_class can request exact match", { x <- structure(list(), class = c("a", "b")) expect_snapshot_failure(expect_s3_class(x, "a", exact = TRUE)) expect_success(expect_s3_class(x, c("a", "b"), exact = TRUE)) }) test_that("expect_s3_class allows unquoting of first argument", { f <- factor("a") expect_success(expect_s3_class(!!rlang::quo(f), "factor")) }) test_that("expect_s4_class allows unquoting of first argument", { cls <- methods::setClass("new_class", slots = c("a" = "numeric")) obj <- methods::new("new_class", a = 3) expect_success(expect_s4_class(!!rlang::quo(obj), "new_class")) }) test_that("expect_s4_class validates its inputs", { expect_snapshot(error = TRUE, { expect_s4_class(factor("a"), 1) }) }) # expect_r6_class -------------------------------------------------------- test_that("expect_r6_class succeeds when object inherits from expected class", { Person <- R6::R6Class("Person") Student <- R6::R6Class("Student", inherit = Person) person <- Person$new() student <- Student$new() expect_success(expect_r6_class(person, "Person")) expect_success(expect_r6_class(student, "Student")) expect_success(expect_r6_class(student, "Person")) }) test_that("expect_r6_class generates useful failures", { x <- 1 person <- R6::R6Class("Person")$new() expect_snapshot_failure({ expect_r6_class(x, "Student") expect_r6_class(person, "Student") }) }) test_that("expect_r6_class validates its inputs", { expect_snapshot(error = TRUE, { expect_r6_class(1, c("Person", "Student")) }) }) # expect_s7_class -------------------------------------------------------- test_that("can check with actual class", { Foo <- S7::new_class("Foo", package = NULL) Bar <- S7::new_class("Bar", package = NULL) expect_success(expect_s7_class(Foo(), class = Foo)) expect_snapshot_failure(expect_s7_class(Foo(), class = Bar)) Baz <- S7::new_class("Baz", parent = Foo, package = NULL) expect_snapshot_failure(expect_s7_class(Baz(), class = Bar)) }) test_that("informative failure if not S7", { Foo <- S7::new_class("Foo", package = NULL) x <- factor() expect_snapshot_failure(expect_s7_class(x, Foo)) }) test_that("expect_s7_class validates its inputs", { skip_if_not_installed("S7") expect_snapshot(expect_s7_class(1, 1), error = TRUE) }) testthat/tests/testthat/test-snapshot-cleanup.R0000644000176200001440000000363414165635513021517 0ustar liggesuserstest_that("snapshot cleanup makes nice message if needed", { dir <- local_snap_dir(c("a.md", "b.md")) expect_snapshot({ snapshot_cleanup(dir) snapshot_cleanup(dir, c("a", "b")) }) }) test_that("deletes empty dirs", { dir <- local_snap_dir(character()) dir.create(file.path(dir, "a", "b", "c"), recursive = TRUE) dir.create(file.path(dir, "b"), recursive = TRUE) dir.create(file.path(dir, "c"), recursive = TRUE) snapshot_cleanup(dir) expect_equal(dir(dir), character()) }) test_that("detects outdated snapshots", { dir <- local_snap_dir(c("a.md", "b.md", "b.new.md")) expect_equal(snapshot_outdated(dir, c("a", "b")), character()) expect_equal(snapshot_outdated(dir, "a"), c("b.md", "b.new.md")) expect_equal(snapshot_outdated(dir, "b"), "a.md") expect_equal(snapshot_outdated(dir), c("a.md", "b.md", "b.new.md")) }) test_that("preserves variants", { dir <- local_snap_dir(c("a.md", "windows/a.md", "windows/b.md")) expect_equal(snapshot_outdated(dir, "a"), "windows/b.md") # Doesn't delete new files in variants dir <- local_snap_dir(c("a.md", "windows/a.md", "windows/a.new.md")) expect_equal(snapshot_outdated(dir, "a"), character()) }) test_that("detects outdated snapshot files", { dir <- local_snap_dir(c("a/foo.txt", "b/foo.txt", "b/foo.new.txt")) expect_equal( snapshot_outdated(dir, character(), character()), c("a/foo.txt", "b/foo.new.txt", "b/foo.txt") ) expect_equal( snapshot_outdated(dir, character(), "a/foo.txt"), c("b/foo.new.txt", "b/foo.txt") ) expect_equal( snapshot_outdated(dir, character(), "b/foo.txt"), "a/foo.txt" ) expect_equal( snapshot_outdated(dir, character(), c("a/foo.txt", "b/foo.txt")), character() ) }) test_that("detects individual snapshots files to remove", { dir <- local_snap_dir(c("a/a1", "a/a2", "b/b1")) expect_equal( snapshot_outdated(dir, c("a", "b"), "a/a1"), c("a/a2", "b/b1") ) }) testthat/tests/testthat/test-expect-invisible.R0000644000176200001440000000112115054145645021472 0ustar liggesuserstest_that("basic principles of visibility hold", { expect_success(expect_invisible(x <- 10)) expect_snapshot_failure(expect_invisible(x)) expect_success(expect_visible(x)) expect_snapshot_failure(expect_visible(x <- 1)) }) test_that("generates useful failure messages", { expect_snapshot_failure(expect_visible(invisible(1))) expect_snapshot_failure(expect_invisible(1)) }) test_that("invisibly returns evaluated value", { out <- expect_invisible(expect_invisible(x <- 2 + 2)) expect_equal(out, 4) out <- expect_invisible(expect_visible(2 + 2)) expect_equal(out, 4) }) testthat/tests/testthat/test-parallel-errors.R0000644000176200001440000000116515047715224021334 0ustar liggesuserstest_that("error in parallel setup code", { skip_on_covr() withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") err <- tryCatch( capture.output(suppressMessages(testthat::test_local( test_path("test-parallel", "syntax-error"), reporter = "summary" ))), error = function(e) e ) expect_s3_class(err, "testthat_process_error") # contains test file's name expect_match(conditionMessage(err), "test-error-1.R") # contains original error message expect_match(conditionMessage(err), "unexpected symbol") # contains the text of the syntax error expect_match(conditionMessage(err), "but this") }) testthat/tests/testthat/test-verify-conditions-cr.txt0000644000176200001440000000002015127731052022702 0ustar liggesusers> cat("\r\n") testthat/tests/testthat/test-old-school.R0000644000176200001440000000170115047715224020265 0ustar liggesuserstest_that("old school types still work", { local_edition(2L) expect_success(expect_that(1L, is_a("integer"))) }) test_that("old school names still work", { local_edition(2L) expect_success(expect_that("a", has_names(NULL))) }) test_that("old school comparisons still work", { local_edition(2L) expect_success(expect_that(10, is_less_than(11))) expect_failure(expect_that(10, is_more_than(11))) }) test_that("old school equality tests still work", { local_edition(2L) expect_success(expect_that(10, equals(10))) expect_success(expect_that(10, is_identical_to(10))) expect_success(expect_that(10, is_equivalent_to(10))) }) test_that("old school output tests still work", { local_edition(2L) expect_success(expect_that(stop("!"), throws_error())) expect_success(expect_that(warning("!"), gives_warning())) expect_success(expect_that(message("!"), shows_message())) expect_success(expect_that(print("!"), prints_text())) }) testthat/tests/testthat/test-snapshot-value.R0000644000176200001440000000324115054053615021171 0ustar liggesuserstest_that("can snapshot values", { x <- list("a", 1.5, 1L, TRUE) expect_snapshot_value(x, style = "json") expect_snapshot_value(x, style = "json2") expect_snapshot_value(x, style = "deparse") expect_snapshot_value(x, style = "serialize") }) test_that("can control snapshot value details", { expect_snapshot_value(1.2, tolerance = 0.1) # including through ... f <- ~1 expect_snapshot_value(f, style = "serialize", ignore_formula_env = TRUE) }) test_that("tolerance passed to check_roundtrip", { expect_snapshot_value(0.900000000000001, style = "json") }) test_that("reparse handles common cases", { roundtrip <- function(x) reparse(deparse(x)) expect_equal(roundtrip(-1), -1) expect_equal(roundtrip(c(1, 2, 3)), c(1, 2, 3)) expect_equal(roundtrip(list(1, 2, 3)), list(1, 2, 3)) expect_equal(roundtrip(mtcars), mtcars) expect_equal(roundtrip(1:10), 1:10) expect_equal(roundtrip(numeric()), numeric()) f <- function(x) x + 1 expect_equal(roundtrip(f), f, ignore_function_env = TRUE) }) test_that("errors if can't roundtrip", { snapper <- local_test_snapshotter() snapper$start_file("snapshot-4", "test") expect_error(expect_snapshot_value(NULL), "safely serialized") }) test_that("check_roundtrip() gives nice error", { # disable crayon usage local_bindings(crayon = FALSE, .env = get_reporter()) wrapper <- function(...) check_roundtrip(...) expect_snapshot( wrapper(NULL, list(), label = "foo", style = "json"), error = TRUE ) }) test_that("expect_snapshot_value validates its inputs", { expect_snapshot(error = TRUE, { expect_snapshot_value(123, cran = "yes") expect_snapshot_value(123, tolerance = "high") }) }) testthat/tests/testthat/test-parallel-outside.R0000644000176200001440000000053415054053615021470 0ustar liggesuserstest_that("error outside of test_that()", { skip_on_covr() withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") err <- tryCatch( capture.output(suppressMessages(testthat::test_local( test_path("test-parallel", "outside"), reporter = "summary" ))), error = function(e) e ) expect_match(err$message, "Test failures") }) testthat/tests/testthat/test-list-reporter/0000755000176200001440000000000015130237654020712 5ustar liggesuserstestthat/tests/testthat/test-list-reporter/test-bare-expectations.R0000644000176200001440000000017614313315360025424 0ustar liggesuserstest_that("before", expect_true(TRUE)) # this is a bare expectation expect_true(TRUE) test_that("after", expect_true(TRUE)) testthat/tests/testthat/test-list-reporter/test-only-error.R0000644000176200001440000000003714313315360024113 0ustar liggesusersstop('dying outside of tests') testthat/tests/testthat/test-list-reporter/test-exception-outside-tests.R0000644000176200001440000000034114313315360026611 0ustar liggesusers# the objective is to test what happens if some code fails outside of tests # i.e. not inside a test_that() call. test_that("before", expect_true(TRUE)) stop('dying outside of tests') test_that("after", expect_true(TRUE)) testthat/tests/testthat/test-list-reporter/test-exercise-list-reporter.R0000644000176200001440000000034414313315360026424 0ustar liggesuserstest_that("test1", expect_true(TRUE)) test_that("test2", expect_true(TRUE)) test_that("test-pass", expect_true(TRUE)) test_that("test-fail", expect_true(FALSE)) test_that("test-error", { stop('argh') expect_true(TRUE) }) testthat/tests/testthat/test-verify-unicode-true.txt0000644000176200001440000000007315127731052022542 0ustar liggesusers> cat(cli::symbol$info, cli::symbol$cross, "\n") ℹ ✖ testthat/tests/testthat/one.rds0000644000176200001440000000005414313316267016414 0ustar liggesusersb```b`fdd`b2Ctestthat/tests/testthat/test-reporter-tap.R0000644000176200001440000000011714164710003020633 0ustar liggesuserstest_that("reporter works", { expect_snapshot_reporter(TapReporter$new()) }) testthat/tests/testthat/test-reporter-summary.R0000644000176200001440000000061115047715224021556 0ustar liggesuserstest_that("can control appearance of dots", { expect_snapshot_reporter(SummaryReporter$new( show_praise = FALSE, omit_dots = FALSE )) expect_snapshot_reporter(SummaryReporter$new( show_praise = FALSE, omit_dots = TRUE )) }) test_that("can control maximum reports", { expect_snapshot_reporter(SummaryReporter$new( show_praise = FALSE, max_reports = 2 )) }) testthat/tests/testthat/test-reporter-minimal.R0000644000176200001440000000013114164710003021471 0ustar liggesuserstest_that("reporter as expected", { expect_snapshot_reporter(MinimalReporter$new()) }) testthat/tests/testthat/test-evaluate-promise.R0000644000176200001440000000113115040747540021500 0ustar liggesuserstest_that("captures warnings, messages and output", { out <- evaluate_promise({ message("m", appendLF = FALSE) warning("w") cat("out") }) expect_equal(out$output, "out") expect_equal(out$messages, "m") expect_equal(out$warnings, "w") }) test_that("capture_warnings captures warnings", { out <- capture_warnings({ warning("a") warning("b") }) expect_equal(out, c("a", "b")) }) test_that("capture_messages captures messages", { out <- capture_messages({ message("a") message("b") }) expect_equal(out, c("a\n", "b\n")) # message adds LF by default }) testthat/tests/testthat/test-expect-setequal.R0000644000176200001440000001131115104404205021315 0ustar liggesusers# setequal ---------------------------------------------------------------- test_that("ignores order and duplicates", { expect_success(expect_setequal(letters, rev(letters))) expect_success(expect_setequal(c("a", "a", "b"), c("b", "b", "a"))) }) test_that("checks both directions of containment", { expect_failure(expect_setequal(letters, letters[-1])) expect_failure(expect_setequal(letters[-1], letters)) }) test_that("truncates long differences", { cnd <- catch_cnd(expect_setequal("a", letters)) expect_match(cnd$message, "...") }) test_that("can compare data frames", { # this isn't really a legit use case but one package does it df <- data.frame(x = 1:10, y = 10:1) expect_success(expect_setequal(unname(df), unname(df))) }) test_that("warns if both inputs are named", { expect_snapshot(expect_setequal(c(a = 1), c(b = 1))) }) test_that("checks inputs", { fun <- sum expect_snapshot(error = TRUE, { expect_setequal(sum, 1) expect_setequal(1, sum) expect_setequal(!!fun, 1) expect_setequal(1, !!fun) }) }) test_that("useful message on failure", { expect_snapshot_failure(expect_setequal("actual", "expected")) x <- 1:2 y <- 2 expect_snapshot_failure(expect_setequal(x, y)) x <- 2 y <- 2:3 expect_snapshot_failure(expect_setequal(x, y)) x <- 1:2 y <- 2:3 expect_snapshot_failure(expect_setequal(x, y)) # doesn't repeat values x <- c("a", "a") y <- c("b", "b", "b") expect_snapshot_failure(expect_setequal(x, y)) # still looks good when expected is inlined x <- c("a", "b", "c") expect_snapshot_failure(expect_setequal(x, c("a", "b", "c", "d"))) }) test_that("truncates long vectors", { x <- 1:2 y <- 1:50 expect_snapshot_failure(expect_setequal(x, y)) }) # mapequal ---------------------------------------------------------------- test_that("ignores order", { expect_success(expect_mapequal(list(a = 1, b = 2), list(b = 2, a = 1))) expect_success(expect_mapequal(c(a = 1, b = 2), c(b = 2, a = 1))) }) test_that("fails if names don't match", { x <- list(a = 1, b = 2) y <- list(a = 1) expect_snapshot_failure(expect_mapequal(x, y)) expect_snapshot_failure(expect_mapequal(y, x)) }) test_that("fails if values don't match", { x <- list(a = 1, b = 2) y <- list(a = 1, b = 3) expect_snapshot_failure(expect_mapequal(x, y)) }) test_that("NULLs are not dropped", { expect_success(expect_mapequal(list(a = 1, b = NULL), list(b = NULL, a = 1))) }) test_that("warns if empty vector", { expect_snapshot(expect_success(expect_mapequal(list(), list()))) }) test_that("ignores integer/numeric differences", { expect_success(expect_mapequal(list(a = 1L), list(a = 1))) }) test_that("uses equality behaviour of current edition", { local_edition(2) expect_success(expect_mapequal(c(a = 1), c(a = 1L))) }) test_that("validates its inputs", { unnamed <- list(1) named <- list(a = 1) duplicated <- list(x = 1, x = 2) expect_snapshot(error = TRUE, { expect_mapequal(sum, named) expect_mapequal(named, sum) expect_mapequal(unnamed, named) expect_mapequal(named, unnamed) expect_mapequal(named, duplicated) expect_mapequal(duplicated, named) }) }) # contains ---------------------------------------------------------------- test_that("expect_contains() succeeds when appropriate", { expect_success(expect_contains(letters, "a")) expect_success(expect_contains(letters, letters)) expect_success(expect_contains(letters, character())) }) test_that("expect_contains() gives useful message on failure", { x1 <- c("a", "b", "c") x2 <- c("c", "d") x3 <- c("d", "e") expect_snapshot_failure(expect_contains(x1, x2)) expect_snapshot_failure(expect_contains(x1, x3)) }) # in ---------------------------------------------------------------- test_that("expect_in() succeeds when appropriate", { expect_success(expect_in("a", letters)) expect_success(expect_in(letters, letters)) expect_success(expect_in(character(), letters)) }) test_that("expect_in() gives useful message on failure", { x1 <- c("a", "b") x2 <- c("b", "c") x3 <- c("d", "e") expect_snapshot_failure(expect_in(x1, x2)) expect_snapshot_failure(expect_in(x1, x3)) }) # disjoint ---------------------------------------------------------------- test_that("expect_disjoint() succeeds when appropriate", { expect_success(expect_disjoint(1, letters)) expect_success(expect_disjoint(LETTERS, letters)) expect_success(expect_disjoint(character(), letters)) }) test_that("expect_disjoint() gives useful message on failure", { x1 <- c("a", "b", "c") x2 <- c("c", "d") x3 <- c("b", "c", "d") expect_snapshot_failure(expect_disjoint(x1, x2)) expect_snapshot_failure(expect_disjoint(x1, x3)) expect_snapshot_failure(expect_disjoint(NA, c("a", NA))) }) testthat/tests/testthat/teardown.R0000644000176200001440000000002414313316267017064 0ustar liggesusersunlink("DELETE-ME") testthat/tests/testthat/test-extract.R0000644000176200001440000000301715104635341017671 0ustar liggesuserstest_that("can extract test from file", { exprs <- parse_file(test_path("extract/simple.R")) dir <- withr::local_tempdir() out_path <- save_test(attr(exprs, "srcref")[[1]], dir = dir) expect_snapshot(base::writeLines(readLines(out_path))) }) # extract_test_lines ----------------------------------------------------------- test_that("can include test env setup", { # fmt: skip exprs <- parse_text(" test_that('foo', { expect_true(TRUE) }) ") expect_snapshot(base::writeLines(extract_test_lines(exprs, 2, "test"))) }) test_that("can extract prequel", { # fmt: skip exprs <- parse_text(" x <- 1 y <- 2 test_that('foo', { expect_true(TRUE) }) ") expect_snapshot(base::writeLines(extract_test_lines(exprs, 4))) }) test_that("preserves code format but not comments", { # fmt: skip exprs <- parse_text(" 1 + 1 # 2 test_that('foo', { 2 + 2 # 4 }) ") expect_snapshot(base::writeLines(extract_test_lines(exprs, 3))) }) test_that("can extract selected expectation", { # fmt: skip exprs <- parse_text(" test_that('foo', { expect_true(TRUE) expect_false(FALSE) }) ") expect_snapshot(base::writeLines(extract_test_lines(exprs, 2))) }) test_that("errors if can't find test", { # fmt: skip exprs <- parse_text(" # line 1 test_that('foo', { expect_true(TRUE) }) # line 5 ") expect_error(extract_test_lines(exprs, 1), "Failed to find test") expect_error(extract_test_lines(exprs, 5), "Failed to find test") }) testthat/tests/testthat/test-test-env.R0000644000176200001440000000161315047715224017771 0ustar liggesuserstest_that("environment has package name", { expect_equal(methods::getPackageName(test_env("testthat")), "testthat") expect_equal(methods::getPackageName(topenv()), "testthat") }) setClass("MyClass") test_that("Cannot create S4 class without special behaviour", { expect_no_error(setClass("MyClass2")) }) test_that("is_checking respects env var", { withr::local_envvar(TESTTHAT_IS_CHECKING = "true") expect_true(is_checking()) withr::local_envvar(TESTTHAT_IS_CHECKING = "false") expect_false(is_checking()) }) test_that("is_snapshot() is true in snapshots", { local_edition(3) # why is this needed? expect_false(is_snapshot()) expect_snapshot(is_snapshot()) expect_snapshot_value(is_snapshot()) expect_snapshot_output(is_snapshot()) expect_snapshot_warning(if (is_snapshot()) warning("Is snapshotting!")) expect_snapshot_error(if (is_snapshot()) stop("Is snapshotting!")) }) testthat/tests/testthat/test-parallel-setup.R0000644000176200001440000000064315047715224021160 0ustar liggesuserstest_that("error in parallel setup code", { skip_on_covr() withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") err <- tryCatch( capture.output(suppressMessages(testthat::test_local( test_path("test-parallel", "setup"), reporter = "summary" ))), error = function(e) e ) expect_s3_class(err, "testthat_process_error") expect_match(conditionMessage(err), "Error in setup", fixed = TRUE) }) testthat/tests/testthat/test-expect-equality.R0000644000176200001440000000724215072252215021345 0ustar liggesuserstest_that("basical principles of equality hold", { local_edition(2) expect_success(expect_equal(1, 1)) expect_failure(expect_equal(1, 2)) expect_success(expect_identical(1, 1)) expect_failure(expect_identical(1, 2)) local_edition(3) expect_success(expect_equal(1, 1)) expect_failure(expect_equal(1, 2)) expect_success(expect_identical(1, 1)) expect_failure(expect_identical(1, 2)) }) test_that("expect_equal() ignores numeric type; expect_identical() does not", { local_edition(2) expect_success(expect_equal(1, 1L)) expect_failure(expect_identical(1, 1L)) local_edition(3) expect_success(expect_equal(1, 1L)) expect_failure(expect_identical(1, 1L)) }) test_that("returns value", { one <- 1 local_edition(3) expect_equal(expect_equal(one, one), 1) expect_equal(expect_identical(one, one), 1) local_edition(2) expect_equal(expect_equal(one, one), 1) expect_equal(expect_identical(one, one), 1) }) test_that("can control numeric tolerance", { x1 <- 1 x2 <- x1 + 1e-6 local_edition(2) expect_failure(expect_equal(x1, x2)) expect_success(expect_equal(x1, x2, tolerance = 1e-5)) expect_success(expect_equivalent(x1, x2, tolerance = 1e-5)) # with partial matching withr::local_options(warnPartialMatchArgs = FALSE) expect_success(expect_equal(x1, x2, tol = 1e-5)) local_edition(3) expect_failure(expect_equal(x1, x2)) expect_success(expect_equal(x1, x2, tolerance = 1e-5)) }) test_that("second edition only optionally sets tolerance", { local_edition(2) # all.equal.POSIXct sets default tolerance to 0.001 x <- .POSIXct(1) y <- .POSIXct(1 + 1e-4) expect_success(expect_equal(x, y)) }) test_that("provide useful feedback on failure (3e)", { x <- 1 expect_snapshot_failure(expect_identical(x, "a")) expect_snapshot_failure(expect_equal(x, "a")) local_edition(2) withr::local_options(testthat.edition_ignore = TRUE) expect_snapshot_failure(expect_identical(x, "a")) expect_snapshot_failure(expect_equal(x, "a")) }) test_that("correctly spaces lines", { expect_snapshot_failure(expect_equal(list(a = 1), list(a = "b", b = 10))) }) test_that("provide useful feedback on failure (2e)", { local_edition(2) withr::local_options(testthat.edition_ignore = TRUE) x <- 1 expect_snapshot_failure(expect_identical(x, "a")) expect_snapshot_failure(expect_equal(x, "a")) }) test_that("default labels use unquoting", { x <- 1 y <- 2 expect_snapshot_failure(expect_equal(x, !!y)) }) test_that("% is not treated as sprintf format specifier (#445)", { expect_failure(expect_equal("+", "%")) expect_failure(expect_equal("%", "+")) expect_success(expect_equal("%", "%")) }) # 2nd edition --------------------------------------------------- test_that("useful message if objects equal but not identical", { local_edition(2) f <- function() x environment(f) <- new_environment() g <- function() x environment(g) <- new_environment() expect_snapshot_failure(expect_identical(f, g)) }) test_that("attributes for object (#452)", { local_edition(2) oops <- structure(0, oops = "oops") expect_equal(oops, oops) expect_snapshot_failure(expect_equal(oops, 0)) expect_equal(as.numeric(oops), 0) }) test_that("expect_equivalent ignores attributes and numeric differences", { local_edition(2) x <- y <- 1 attr(y, "y") <- y expect_success(expect_equivalent(x, y)) expect_success(expect_equivalent(x, 1L)) }) test_that("expect_equivalent returns value", { local_edition(2) one <- 1 expect_equal(expect_equivalent(one, one), 1) }) test_that("expect_equal validates its inputs", { expect_snapshot(error = TRUE, { expect_equal(1, 2, tolerance = "high") expect_equal(1, 2, tolerance = -1) }) }) testthat/tests/testthat/test-source.R0000644000176200001440000001114315071472371017523 0ustar liggesuserstest_that("source_file always uses UTF-8 encoding", { has_locale <- function(l) { has <- TRUE tryCatch( withr::with_locale(c(LC_CTYPE = l), "foobar"), warning = function(w) has <<- FALSE, error = function(e) has <<- FALSE ) has } ## Some text in UTF-8 tmp <- tempfile() withr::defer(unlink(tmp)) utf8 <- as.raw(c( 0xc3, 0xa1, 0x72, 0x76, 0xc3, 0xad, 0x7a, 0x74, 0xc5, 0xb1, 0x72, 0xc5, 0x91, 0x20, 0x74, 0xc3, 0xbc, 0x6b, 0xc3, 0xb6, 0x72, 0x66, 0xc3, 0xba, 0x72, 0xc3, 0xb3, 0x67, 0xc3, 0xa9, 0x70 )) writeBin(c(charToRaw("x <- \""), utf8, charToRaw("\"\n")), tmp) run_test <- function(locale) { if (has_locale(locale)) { env <- new.env() withr::with_locale( c(LC_CTYPE = locale), source_file(tmp, env = env, wrap = FALSE) ) expect_equal(Encoding(env$x), "UTF-8") expect_equal(charToRaw(env$x), utf8) } } ## Try to read it in latin1 and UTF-8 locales ## They have different names on Unix and Windows run_test("en_US.ISO8859-1") run_test("en_US.UTF-8") run_test("English_United States.1252") run_test("German_Germany.1252") run_test(Sys.getlocale("LC_CTYPE")) }) test_that("source_file wraps error", { expect_snapshot(error = TRUE, { source_file(test_path("reporters/error-setup.R"), wrap = FALSE) }) }) test_that("checks its inputs", { expect_snapshot(error = TRUE, { source_file(1) source_file("x") source_file(".", "x") }) }) # filter_desc ------------------------------------------------------------- test_that("works with all subtest types", { code <- exprs( test_that("foo", {}), describe("bar", {}), it("baz", {}) ) expect_equal(filter_desc(code, "foo"), code[1]) expect_equal(filter_desc(code, "bar"), code[2]) expect_equal(filter_desc(code, "baz"), code[3]) }) test_that("only returns non-subtest code before subtest", { code <- exprs( f(), test_that("bar", {}), describe("foo", {}), g(), h() ) expect_equal(filter_desc(code, "foo"), code[c(1, 3)]) }) test_that("can select recursively", { code <- exprs( x <- 1, describe("a", { y <- 1 describe("b", { z <- 1 }) y <- 2 }), x <- 2 ) expect_equal( filter_desc(code, c("a", "b")), exprs( x <- 1, describe("a", { y <- 1 describe("b", { z <- 1 }) }) ) ) }) test_that("works on code like the describe() example", { code <- exprs( describe("math library", { x1 <- 1 x2 <- 1 describe("addition()", { it("can add two numbers", { expect_equal(x1 + x2, addition(x1, x2)) }) }) describe("division()", { x1 <- 10 x2 <- 2 it("can divide two numbers", { expect_equal(x1 / x2, division(x1, x2)) }) it("can handle division by 0") #not yet implemented }) }) ) expect_equal( filter_desc( code, c("math library", "division()", "can divide two numbers") ), exprs( describe("math library", { x1 <- 1 x2 <- 1 describe("division()", { x1 <- 10 x2 <- 2 it("can divide two numbers", { expect_equal(x1 / x2, division(x1, x2)) }) }) }) ) ) # what happens for an unimplemented specification? expect_snapshot( error = TRUE, filter_desc( code, c("math library", "division()", "can handle division by 0") ) ) }) test_that("preserve srcrefs", { code <- parse( keep.source = TRUE, text = ' test_that("foo", { # this is a comment }) ' ) expect_snapshot(filter_desc(code, "foo")) }) test_that("errors if zero or duplicate labels", { code <- exprs( f(), test_that("baz", {}), test_that("baz", {}), g() ) expect_snapshot(error = TRUE, { filter_desc(code, "baz") filter_desc(code, "missing") }) }) test_that("source_dir()", { res <- source_dir("test_dir", pattern = "hello", chdir = TRUE, wrap = FALSE) expect_equal(res[[1]](), "Hello World") res <- source_dir( normalizePath("test_dir"), pattern = "hello", chdir = TRUE, wrap = FALSE ) expect_equal(res[[1]](), "Hello World") res <- source_dir("test_dir", pattern = "hello", chdir = FALSE, wrap = FALSE) expect_equal(res[[1]](), "Hello World") res <- source_dir( normalizePath("test_dir"), pattern = "hello", chdir = FALSE, wrap = FALSE ) expect_equal(res[[1]](), "Hello World") }) testthat/tests/testthat/test-edition.R0000644000176200001440000000242015047715224017654 0ustar liggesuserstest_that("can locally override edition", { local_edition(3) expect_equal(edition_get(), 3) local_edition(2) expect_equal(edition_get(), 2) }) test_that("checks its inputs", { expect_snapshot(error = TRUE, { local_edition("x") local_edition(5) }) }) test_that("deprecation only fired for newer edition", { local_edition(2) expect_no_warning(edition_deprecate(3, "old stuff")) local_edition(3) expect_snapshot(edition_deprecate(3, "old stuff")) }) test_that("required only fired for older edition", { withr::local_options(testthat.edition_ignore = FALSE) local_edition(2) expect_error(edition_require(3, "new stuff")) withr::local_options(testthat.edition_ignore = FALSE) local_edition(3) expect_no_error(edition_require(3, "new stuff")) }) test_that("edition for testthat is 3", { expect_equal(find_edition(package = "testthat"), 3) }) test_that("edition for non-package dir is 2", { withr::local_envvar(TESTTHAT_EDITION = NULL) expect_equal(find_edition(tempdir()), 2) }) test_that("can set the edition via an environment variable", { local_bindings(edition = zap(), .env = the) withr::local_envvar(TESTTHAT_EDITION = 2) expect_equal(edition_get(), 2) withr::local_envvar(TESTTHAT_EDITION = 3) expect_equal(edition_get(), 3) }) testthat/tests/testthat/test-reporter-zzz.R0000644000176200001440000000113315047715224020716 0ustar liggesuserstest_that("can locate reporter from name", { expect_equal(find_reporter("minimal"), MinimalReporter$new()) expect_equal(find_reporter("summary"), SummaryReporter$new()) }) test_that("useful error message if can't find reporter", { expect_snapshot(error = TRUE, { find_reporter(1) find_reporter("blah") find_reporter(c("summary", "blah")) }) }) test_that("character vector yields multi reporter", { expect_equal( find_reporter(c("summary", "stop")), MultiReporter$new( reporters = list( SummaryReporter$new(), StopReporter$new() ) ) ) }) testthat/tests/testthat/test-test-path.R0000644000176200001440000000242715047715224020141 0ustar liggesuserstest_that("always returns a path", { withr::local_envvar(TESTTHAT = "true") withr::local_options(testthat_interactive = FALSE) expect_equal(test_path(), ".") }) test_that("is vectorised", { withr::local_envvar(TESTTHAT = "true") withr::local_options(testthat_interactive = FALSE) expect_equal(test_path("x", c("a", "b")), c("x/a", "x/b")) }) test_that("uses local path when called from test_file()/tools::testInstalledPackages()", { withr::local_envvar(TESTTHAT = "true") withr::local_options(testthat_interactive = FALSE) expect_equal(test_path("path"), "path") }) test_that("returns local path during package loading", { withr::local_envvar(TESTTHAT = "false", DEVTOOLS_LOAD = "testthat") expect_equal(test_path("path"), "path") }) test_that("returns full path when called interactively", { withr::local_envvar("TESTTHAT" = "false") pkg <- withr::local_tempdir() dir.create(file.path(pkg, "tests", "testthat"), recursive = TRUE) withr::local_dir(pkg) expect_equal(test_path("path"), "tests/testthat/path") }) test_that("throws error if can't find tests/testthat", { withr::local_envvar("TESTTHAT" = "false") withr::local_dir(withr::local_tempdir()) local_edition(3) local_reproducible_output() expect_snapshot(test_path("empty"), error = TRUE) }) testthat/tests/testthat/test-expect-match.R0000644000176200001440000000521215072252215020577 0ustar liggesuserstest_that("useful failure if empty", { zero <- character(0) expect_snapshot_failure(expect_match(zero, 'asdf')) }) test_that("useful failure messages for scalars", { local_reproducible_output(unicode = TRUE) one <- "bcde" expect_snapshot_failure(expect_match(one, 'asdf')) expect_snapshot_failure(expect_match(one, 'asdf', fixed = TRUE)) }) test_that("useful failure messages for vectors", { local_reproducible_output(unicode = TRUE) many <- c("a", "a", "b") expect_snapshot_failure(expect_match(many, "a")) expect_snapshot_failure(expect_match(many, "c", all = FALSE)) paragraph <- c("This is a multiline\nparagraph.", "Second element.") expect_snapshot_failure(expect_match(paragraph, "paragraph")) na <- c("NA", NA) expect_snapshot_failure(expect_match(na, "NA")) }) test_that("expect_match validates its inputs", { expect_snapshot(error = TRUE, { expect_match(1) expect_match("x", 1) expect_match("x", "x", fixed = 1) expect_match("x", "x", perl = 1) expect_match("x", "x", all = 1) }) }) test_that("expect_no_match validates its inputs", { expect_snapshot(error = TRUE, { expect_no_match(1, "x") expect_no_match("x", 1) expect_no_match("x", "x", fixed = 1) expect_no_match("x", "x", perl = 1) expect_no_match("x", "x", all = 1) }) }) test_that("extra arguments passed onto grepl", { expect_failure(expect_match("\\s", "\\s")) expect_success(expect_match("\\s", "\\s", fixed = TRUE)) expect_failure(expect_match("test", "TEST")) expect_success(expect_match("test", "TEST", ignore.case = TRUE)) }) test_that("expect_no_match works", { expect_success(expect_no_match("[a]", "[b]")) expect_success(expect_no_match("[a]", "[b]", fixed = TRUE)) x <- "te*st" expect_snapshot_failure(expect_no_match(x, "e*", fixed = TRUE)) x <- "test" expect_snapshot_failure(expect_no_match(x, "TEST", ignore.case = TRUE)) }) test_that("empty string is never a match", { expect_success(expect_no_match(character(), "x")) }) # show_text() ------------------------------------------------------------------ test_that("show_text() shows success and failure", { local_reproducible_output(unicode = TRUE) expect_snapshot({ base::writeLines(show_text(c("a", "b"), c(TRUE, FALSE))) }) }) test_that("show_text() truncates values and lines", { local_reproducible_output(unicode = TRUE) lines <- map_chr( split(letters, (seq_along(letters) - 1) %/% 3), paste, collapse = "\n" ) expect_snapshot({ base::writeLines(show_text(lines, max_lines = 3)) base::writeLines(show_text(lines, max_items = 3)) base::writeLines(show_text(lines, max_items = 2, max_lines = 4)) }) }) testthat/tests/testthat/test-mock2-helpers.R0000644000176200001440000000174615047715224020706 0ustar liggesuserstest_that("mock_output_sequence() works", { mocked_sequence <- mock_output_sequence("3", "This is a note", "n") expect_equal(mocked_sequence(), "3") expect_equal(mocked_sequence(), "This is a note") expect_equal(mocked_sequence(), "n") expect_snapshot(mocked_sequence(), error = TRUE) }) test_that("mock_output_sequence() works -- list", { x <- list("3", "This is a note", "n") mocked_sequence <- mock_output_sequence(!!!x) expect_equal(mocked_sequence(), "3") expect_equal(mocked_sequence(), "This is a note") expect_equal(mocked_sequence(), "n") }) test_that("mock_output_sequence()'s recycling works", { mocked_sequence <- mock_output_sequence( "3", "This is a note", "n", recycle = TRUE ) expect_equal(mocked_sequence(), "3") expect_equal(mocked_sequence(), "This is a note") expect_equal(mocked_sequence(), "n") expect_equal(mocked_sequence(), "3") expect_equal(mocked_sequence(), "This is a note") expect_equal(mocked_sequence(), "n") }) testthat/tests/testthat/test-reporter-multi.R0000644000176200001440000000055514164710003021207 0ustar liggesuserstest_that("MultiReporter", { reports <- lapply(seq_len(3), function(x) ListReporter$new()) reporter <- MultiReporter$new(reporters = reports) with_reporter(reporter, test_one_file("context.R")) dfs <- lapply(reports, function(x) as.data.frame(x$get_results())) expect_equal(dfs[[2]][1:7], dfs[[1]][1:7]) expect_equal(dfs[[3]][1:7], dfs[[1]][1:7]) }) testthat/tests/testthat/test-reporter-llm.R0000644000176200001440000000157415127561732020660 0ustar liggesuserstest_that("reports issues immediately but not successes", { expect_snapshot_reporter( LlmReporter$new(), test_path("reporters/tests.R") ) }) test_that("reports only summary for all successes", { expect_snapshot_reporter( LlmReporter$new(), test_path("reporters/successes.R") ) }) test_that("fails after max_fail tests", { withr::local_options(testthat.progress.max_fails = 3) expect_snapshot_reporter( LlmReporter$new(), test_path(c("reporters/fail-many.R", "reporters/fail.R")) ) }) test_that("is_llm() detects known LLM agent environment variables", { withr::local_envvar( AGENT = NA, CLAUDECODE = NA, GEMINI_CLI = NA, CURSOR_AGENT = NA ) expect_false(is_llm()) local({ withr::local_envvar(AGENT = "1") expect_true(is_llm()) }) local({ withr::local_envvar(CLAUDECODE = "1") expect_true(is_llm()) }) }) testthat/tests/testthat/test-reporter-progress.R0000644000176200001440000000517615104634254021735 0ustar liggesuserstest_that("captures error before first test", { local_output_override() expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path("reporters/error-setup.R") ) }) test_that("gracefully handles multiple contexts", { expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path("reporters/context.R") ) }) test_that("can control max fails with env var or option", { withr::local_envvar(TESTTHAT_MAX_FAILS = 11) expect_equal(testthat_max_fails(), 11) withr::local_options(testthat.progress.max_fails = 12) expect_equal(testthat_max_fails(), 12) }) test_that("fails after max_fail tests", { withr::local_options(testthat.progress.max_fails = 10) expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path(c("reporters/fail-many.R", "reporters/fail.R")) ) }) test_that("can fully suppress incremental updates", { expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path("reporters/successes.R") ) expect_snapshot_reporter( ProgressReporter$new(update_interval = Inf, min_time = Inf), test_path("reporters/successes.R") ) }) test_that("reports backtraces", { withr::local_envvar(TESTTHAT_MAX_FAILS = Inf) expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path("reporters/backtraces.R") ) }) test_that("records skips", { expect_snapshot_reporter( ProgressReporter$new(update_interval = 0, min_time = Inf), test_path("reporters/skips.R") ) }) # compact display --------------------------------------------------------- test_that("compact display is informative", { expect_snapshot_reporter( CompactProgressReporter$new(), test_path("reporters/tests.R") ) }) test_that("display of successes only is compact", { expect_snapshot_reporter( CompactProgressReporter$new(), test_path("reporters/successes.R") ) expect_snapshot_reporter( CompactProgressReporter$new(), test_path("reporters/skips.R") ) # And even more compact if in RStudio pane local_reproducible_output(rstudio = TRUE) expect_snapshot_reporter( CompactProgressReporter$new(), test_path("reporters/successes.R") ) }) # parallel progress reporter ---------------------------------------------- test_that("ParallelProgressReporter fails after max_fail tests", { withr::local_options(testthat.progress.max_fails = 10) expect_snapshot_reporter( ParallelProgressReporter$new(update_interval = 0, min_time = Inf), test_path(c("reporters/fail-many.R", "reporters/fail.R")) ) }) testthat/tests/testthat/test-expect-vector.R0000644000176200001440000000065315127531132021010 0ustar liggesuserstest_that("basic properties upheld", { skip_if_not_installed("vctrs", "0.1.0.9002") expect_success(expect_vector(1:10, size = 10)) x <- 1:10 expect_snapshot(error = TRUE, { expect_vector(x, size = 5) }) y <- NULL expect_snapshot(error = TRUE, { expect_vector(y) }) }) test_that("expect_vector validates its inputs", { expect_snapshot(error = TRUE, { expect_vector(1:5, size = "large") }) }) testthat/tests/testthat/test-helpers.R0000644000176200001440000000013714313316267017665 0ustar liggesusers# See helper-assign.R test_that("helpers run before tests", { expect_equal(abcdefghi, 10) }) testthat/tests/testthat/too-many-failures.R0000644000176200001440000000044314313316267020621 0ustar liggesuserstest_that("SummaryReport gives up if too many errors", { expect_equal(Inf, 1) expect_equal(Inf, 2) expect_equal(Inf, 3) expect_equal(Inf, 4) expect_equal(Inf, 5) expect_equal(Inf, 6) expect_equal(Inf, 7) expect_equal(Inf, 8) expect_equal(Inf, 9) expect_equal(Inf, 10) }) testthat/tests/testthat/extract/0000755000176200001440000000000015104635341016570 5ustar liggesuserstestthat/tests/testthat/extract/simple.R0000644000176200001440000000005215104635341020201 0ustar liggesuserstest_that('foo', { expect_true(TRUE) }) testthat/tests/testthat/test-verify-output.R0000644000176200001440000000473515047715224021076 0ustar liggesuserstest_that("can record all types of output", { verify_output(test_path("test-verify-output.txt"), { "Output" 1 + 2 invisible(1:10) 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 "# Header" "Other output" letters }) }) test_that("can record all types of output", { local_bindings( .env = global_env(), conditionMessage.foobar = function(cnd) { paste("Dispatched!", cnd$message) } ) verify_output(test_path("test-verify-conditions.txt"), { message("Message") "With calls" warning("Warning") stop("Error") "Without calls" warning("Warning", call. = FALSE) stop("Error", call. = FALSE) "With `conditionMessage()` method" cnd_signal(message_cnd("foobar", message = "Message")) cnd_signal(warning_cnd("foobar", message = "Warning")) cnd_signal(error_cnd("foobar", message = "Error")) }) }) test_that("can't record plots", { skip_if(interactive()) expect_snapshot(error = TRUE, verify_output(tempfile(), plot(1:10))) }) test_that("verify_output() splits condition messages on newlines", { verify_output(test_path("test-verify-conditions-lines.txt"), { message("First.\nSecond.") warning("First.\nSecond.") stop("First.\nSecond.") }) }) test_that("can use constructed calls in verify_output() (#945)", { verify_output(test_path("test-verify-constructed-calls.txt"), { expr(foo(!!c("bar", "baz"))) # Can unquote local objects binding <- quote(foo) expr(foo(!!binding)) }) }) test_that("verify_output() doesn't use cli unicode by default", { verify_output( test_path("test-verify-unicode-false.txt"), { cat(cli::symbol$info, cli::symbol$cross, "\n") } ) local_reproducible_output(unicode = TRUE) verify_output( test_path("test-verify-unicode-true.txt"), unicode = TRUE, { cat(cli::symbol$info, cli::symbol$cross, "\n") } ) }) test_that("verify_output() handles carriage return", { verify_output(test_path("test-verify-conditions-cr.txt"), { cat("\r\n") }) }) test_that("verify_exec() doesn't leave tempfiles around", { before <- dir(tempdir()) verify_exec(quote(1 + 1)) after <- dir(tempdir()) expect_equal(before, after) }) test_that("verify_exec() strips CR", { act <- verify_exec(quote(cat("\r\n"))) exp <- verify_exec(quote(cat("\n"))) expect_equal(act[-1], exp[-1]) }) testthat/tests/testthat/test-path-present/0000755000176200001440000000000013701151360020500 5ustar liggesuserstestthat/tests/testthat/test-path-present/tests/0000755000176200001440000000000013701151360021642 5ustar liggesuserstestthat/tests/testthat/test-path-present/tests/testthat/0000755000176200001440000000000013701151360023502 5ustar liggesuserstestthat/tests/testthat/test-path-present/tests/testthat/empty0000644000176200001440000000000013701151360024551 0ustar liggesuserstestthat/tests/testthat/utf8.R0000644000176200001440000000011714313316267016132 0ustar liggesuserstest_that("sourced with correct encoding", { expect_equal("ä", "\u00e4") }) testthat/tests/testthat/test-mock.R0000644000176200001440000000020315054053615017144 0ustar liggesuserstest_that("now defunct", { expect_snapshot(error = TRUE, { local_mock() with_mock(is_testing = function() FALSE) }) }) testthat/tests/testthat/test-skip.R0000644000176200001440000001233015054412736017170 0ustar liggesuserstest_that("basic skips work as expected", { expect_snapshot_skip(skip()) expect_no_skip(skip_if(FALSE)) expect_snapshot_skip(skip_if(TRUE)) expect_no_skip(skip_if_not(TRUE)) expect_snapshot_skip(skip_if_not(FALSE)) expect_snapshot_skip(skip_empty()) }) test_that("autogenerated message is always single line", { a_very_long_argument_name <- FALSE cnd <- capture_condition(skip_if_not( a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name )) expect_length(cnd$message, 1) # ensure the message is not repeated, #1290 expect_snapshot_output(cat(cnd$message)) }) # skip helpers ------------------------------------------------------------ test_that("skip_if_not_installed() works as expected, absence and version", { local_mocked_bindings(package_version = function(x) "3.0.0") expect_snapshot_skip(skip_if_not_installed("doesntexist")) expect_snapshot_skip(skip_if_not_installed("testthat", "9999.9999.999")) expect_no_skip(skip_if_not_installed("testthat", "1.0.0")) }) test_that("skip_if_not_installed() works as expected, offline", { skip_if_not_installed("curl") skip_on_cran() expect_no_skip(skip_if_offline()) local_mocked_bindings(has_internet = function(host) FALSE) expect_snapshot_skip(skip_if_offline()) }) test_that("skip_on_cran() works as expected", { local({ local_on_cran(FALSE) expect_no_skip(skip_on_cran()) }) local({ local_on_cran(TRUE) expect_skip(skip_on_cran()) }) withr::local_envvar(NOT_CRAN = NA) local_mocked_bindings(interactive = function() TRUE) expect_no_skip(skip_on_cran()) local_mocked_bindings(interactive = function() FALSE) expect_skip(skip_on_cran()) }) test_that("local_on_cran sets NOT_CRAN", { local({ local_on_cran(TRUE) expect_equal(on_cran(), TRUE) expect_equal(Sys.getenv("NOT_CRAN"), "false") }) local({ local_on_cran(FALSE) expect_equal(on_cran(), FALSE) expect_equal(Sys.getenv("NOT_CRAN"), "true") }) }) test_that("local_assume_not_on_cran() sets NOT_CRAN if not already set", { withr::local_envvar(NOT_CRAN = NA) local({ local_assume_not_on_cran() expect_equal(Sys.getenv("NOT_CRAN"), "true") }) withr::local_envvar(NOT_CRAN = "false") local({ local_assume_not_on_cran() expect_equal(Sys.getenv("NOT_CRAN"), "false") }) }) test_that("skip_on_ci() works as expected", { withr::local_envvar(CI = "false") expect_no_skip(skip_on_ci()) withr::local_envvar(CI = "true") expect_snapshot_skip(skip_on_ci()) }) test_that("skip_on_covr() works as expected", { withr::local_envvar(R_COVR = "false") expect_no_skip(skip_on_covr()) withr::local_envvar(R_COVR = "true") expect_snapshot_skip(skip_on_covr()) }) test_that("skip_on_bioc() works as expected", { expect_no_skip(skip_on_bioc()) withr::local_envvar(IS_BIOC_BUILD_MACHINE = "true") expect_snapshot_skip(skip_on_bioc()) }) test_that("superseded CI skips still work", { expect_no_skip(skip_on_travis()) expect_no_skip(skip_on_appveyor()) withr::local_envvar(TRAVIS = "true", APPVEYOR = "true") expect_snapshot_skip(skip_on_travis()) expect_snapshot_skip(skip_on_appveyor()) }) test_that("skip_if_translated() works as expected", { local_mocked_bindings(gettext = function(msg, ...) msg) expect_no_skip(skip_if_translated()) local_mocked_bindings(gettext = function(msg, ...) toupper(msg)) expect_snapshot_skip(skip_if_translated()) }) # skip_on_os() ------------------------------------------------------------ test_that("skip on os checks os names", { expect_snapshot(skip_on_os("amiga"), error = TRUE) }) test_that("can skip on multiple oses", { local_mocked_bindings(system_os = function() "windows") expect_snapshot_skip(skip_on_os("windows")) expect_snapshot_skip(skip_on_os(c("windows", "linux"))) expect_no_skip(skip_on_os("linux")) expect_no_skip(skip_on_os("mac")) expect_no_skip(skip_on_os("emscripten")) }) test_that("can refine os with arch", { local_mocked_bindings( system_os = function() "windows", system_arch = function() "i386" ) expect_snapshot_skip(skip_on_os("windows")) expect_snapshot_skip(skip_on_os("windows", "i386")) expect_no_skip(skip_on_os("windows", "x86_64")) expect_no_skip(skip_on_os("linux", "i386")) }) test_that("skip_unless_r works as expected", { expect_no_skip(skip_unless_r(">= 0.0.0")) expect_no_skip(skip_unless_r(paste("==", getRversion()))) expect_no_skip(skip_unless_r("<= 999.999.999")) expect_skip(skip_unless_r(">= 999.999.999")) expect_skip(skip_unless_r("== 0.0.0")) expect_skip(skip_unless_r("<= 0.0.0")) expect_snapshot(error = TRUE, skip_unless_r("idfjdij")) }) test_that("skip_unless_r gives the expected output", { local_mocked_bindings(getRversion = \() numeric_version("4.5.0")) expect_snapshot_skip(skip_unless_r(">= 999.999.999")) expect_snapshot_skip(skip_unless_r("== 0.0.0")) }) testthat/tests/testthat/test-parallel/0000755000176200001440000000000015130664352017672 5ustar liggesuserstestthat/tests/testthat/test-parallel/fail/0000755000176200001440000000000015040747541020607 5ustar liggesuserstestthat/tests/testthat/test-parallel/fail/tests/0000755000176200001440000000000015040747541021751 5ustar liggesuserstestthat/tests/testthat/test-parallel/fail/tests/testthat/0000755000176200001440000000000015130664352023607 5ustar liggesuserstestthat/tests/testthat/test-parallel/fail/tests/testthat/test-bad.R0000644000176200001440000000006015047715224025433 0ustar liggesuserstest_that("bad test", { expect_true(FALSE) }) testthat/tests/testthat/test-parallel/fail/tests/testthat.R0000644000176200001440000000006015040747541023730 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/fail/NAMESPACE0000644000176200001440000000005615040747541022027 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/fail/DESCRIPTION0000644000176200001440000000110415040747541022311 0ustar liggesusersPackage: ok Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/startup/0000755000176200001440000000000014313316272021371 5ustar liggesuserstestthat/tests/testthat/test-parallel/startup/tests/0000755000176200001440000000000014313316272022533 5ustar liggesuserstestthat/tests/testthat/test-parallel/startup/tests/testthat/0000755000176200001440000000000015130664352024376 5ustar liggesuserstestthat/tests/testthat/test-parallel/startup/tests/testthat/test-startup-1.R0000644000176200001440000000007014313316272027330 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/startup/tests/testthat/test-empty.R0000644000176200001440000000007615127561732026643 0ustar liggesusers# Empty test (used to ensure parallel execution is performed) testthat/tests/testthat/test-parallel/startup/tests/testthat.R0000644000176200001440000000006014313316272024512 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/startup/R/0000755000176200001440000000000015047715224021577 5ustar liggesuserstestthat/tests/testthat/test-parallel/startup/R/fail.R0000644000176200001440000000015315047715224022634 0ustar liggesusers.onLoad <- function(libname, pkgname) { stop("This will fail when loading the package", call. = FALSE) } testthat/tests/testthat/test-parallel/startup/NAMESPACE0000644000176200001440000000005614313316272022611 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/startup/DESCRIPTION0000644000176200001440000000110414313316272023073 0ustar liggesusersPackage: ok Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/ok/0000755000176200001440000000000014313316272020300 5ustar liggesuserstestthat/tests/testthat/test-parallel/ok/tests/0000755000176200001440000000000014313316272021442 5ustar liggesuserstestthat/tests/testthat/test-parallel/ok/tests/testthat/0000755000176200001440000000000015130664352023305 5ustar liggesuserstestthat/tests/testthat/test-parallel/ok/tests/testthat/test-ok-3.R0000644000176200001440000000010414313316272025146 0ustar liggesuserstest_that("this skips", { skip(paste("This is", Sys.getpid())) }) testthat/tests/testthat/test-parallel/ok/tests/testthat/test-ok-1.R0000644000176200001440000000007014313316272025146 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/ok/tests/testthat/test-ok-2.R0000644000176200001440000000007614313316272025155 0ustar liggesuserstest_that("this fails", { expect_equal(Sys.getpid(), 0L) }) testthat/tests/testthat/test-parallel/ok/tests/testthat.R0000644000176200001440000000006014313316272023421 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/ok/NAMESPACE0000644000176200001440000000005614313316272021520 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/ok/DESCRIPTION0000644000176200001440000000110414313316272022002 0ustar liggesusersPackage: ok Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/stdout/0000755000176200001440000000000015047715224021216 5ustar liggesuserstestthat/tests/testthat/test-parallel/stdout/tests/0000755000176200001440000000000015047715224022360 5ustar liggesuserstestthat/tests/testthat/test-parallel/stdout/tests/testthat/0000755000176200001440000000000015130664352024216 5ustar liggesuserstestthat/tests/testthat/test-parallel/stdout/tests/testthat/test-stdout-2.R0000644000176200001440000000012415047715224026776 0ustar liggesuserstest_that("this messages", { message("This is a message!") expect_true(TRUE) }) testthat/tests/testthat/test-parallel/stdout/tests/testthat/test-stdout-1.R0000644000176200001440000000007015047715224026775 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/stdout/tests/testthat/test-stdout-3.R0000644000176200001440000000013515047715224027001 0ustar liggesuserstest_that("this prints and skips", { print(1:30) skip(paste("This is", Sys.getpid())) }) testthat/tests/testthat/test-parallel/stdout/tests/testthat.R0000644000176200001440000000006015047715224024337 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/stdout/NAMESPACE0000644000176200001440000000005615047715224022436 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/stdout/DESCRIPTION0000644000176200001440000000110715047715224022723 0ustar liggesusersPackage: setup Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/crash/0000755000176200001440000000000014505026507020771 5ustar liggesuserstestthat/tests/testthat/test-parallel/crash/tests/0000755000176200001440000000000014313316272022131 5ustar liggesuserstestthat/tests/testthat/test-parallel/crash/tests/testthat/0000755000176200001440000000000015130664352023774 5ustar liggesuserstestthat/tests/testthat/test-parallel/crash/tests/testthat/test-crash-2.R0000644000176200001440000000007614313316272026333 0ustar liggesuserstest_that("this fails", { expect_equal(Sys.getpid(), 0L) }) testthat/tests/testthat/test-parallel/crash/tests/testthat/test-crash-3.R0000644000176200001440000000013515047715224026335 0ustar liggesuserstest_that("this crashes", { expect_true(TRUE) expect_true(FALSE) rlang::node_car(0) }) testthat/tests/testthat/test-parallel/crash/tests/testthat/test-crash-1.R0000644000176200001440000000007014313316272026324 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/crash/tests/testthat.R0000644000176200001440000000006014313316272024110 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/crash/NAMESPACE0000644000176200001440000000005614313316272022207 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/crash/DESCRIPTION0000644000176200001440000000110414313316272022471 0ustar liggesusersPackage: ok Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/outside/0000755000176200001440000000000014313316272021343 5ustar liggesuserstestthat/tests/testthat/test-parallel/outside/tests/0000755000176200001440000000000014313316272022505 5ustar liggesuserstestthat/tests/testthat/test-parallel/outside/tests/testthat/0000755000176200001440000000000015130664352024350 5ustar liggesuserstestthat/tests/testthat/test-parallel/outside/tests/testthat/test-outside-3.R0000644000176200001440000000010414313316272027254 0ustar liggesuserstest_that("this skips", { skip(paste("This is", Sys.getpid())) }) testthat/tests/testthat/test-parallel/outside/tests/testthat/test-outside-2.R0000644000176200001440000000014415047715224027264 0ustar liggesusersstop("Error outside of test_that()") test_that("this fails", { expect_equal(Sys.getpid(), 0L) }) testthat/tests/testthat/test-parallel/outside/tests/testthat/test-outside-1.R0000644000176200001440000000007014313316272027254 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/outside/tests/testthat.R0000644000176200001440000000006014313316272024464 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/outside/NAMESPACE0000644000176200001440000000005614313316272022563 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/outside/DESCRIPTION0000644000176200001440000000111114313316272023043 0ustar liggesusersPackage: outside Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/setup/0000755000176200001440000000000014313316272021027 5ustar liggesuserstestthat/tests/testthat/test-parallel/setup/tests/0000755000176200001440000000000014313316272022171 5ustar liggesuserstestthat/tests/testthat/test-parallel/setup/tests/testthat/0000755000176200001440000000000015130664352024034 5ustar liggesuserstestthat/tests/testthat/test-parallel/setup/tests/testthat/test-setup-1.R0000644000176200001440000000007014313316272026424 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/setup/tests/testthat/test-setup-2.R0000644000176200001440000000007614313316272026433 0ustar liggesuserstest_that("this fails", { expect_equal(Sys.getpid(), 0L) }) testthat/tests/testthat/test-parallel/setup/tests/testthat/setup-bad.R0000644000176200001440000000002715047715224026044 0ustar liggesusersstop("Error in setup") testthat/tests/testthat/test-parallel/setup/tests/testthat/test-setup-3.R0000644000176200001440000000010414313316272026424 0ustar liggesuserstest_that("this skips", { skip(paste("This is", Sys.getpid())) }) testthat/tests/testthat/test-parallel/setup/tests/testthat.R0000644000176200001440000000006014313316272024150 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/setup/NAMESPACE0000644000176200001440000000005614313316272022247 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/setup/DESCRIPTION0000644000176200001440000000110714313316272022534 0ustar liggesusersPackage: setup Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/teardown/0000755000176200001440000000000014313316272021512 5ustar liggesuserstestthat/tests/testthat/test-parallel/teardown/tests/0000755000176200001440000000000014313316272022654 5ustar liggesuserstestthat/tests/testthat/test-parallel/teardown/tests/testthat/0000755000176200001440000000000015130664352024517 5ustar liggesuserstestthat/tests/testthat/test-parallel/teardown/tests/testthat/teardown-bad.R0000644000176200001440000000003215047715224027206 0ustar liggesusersstop("Error in teardown") testthat/tests/testthat/test-parallel/teardown/tests/testthat/test-teardown-1.R0000644000176200001440000000007014313316272027572 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) testthat/tests/testthat/test-parallel/teardown/tests/testthat/test-empty.R0000644000176200001440000000007615127561732026764 0ustar liggesusers# Empty test (used to ensure parallel execution is performed) testthat/tests/testthat/test-parallel/teardown/tests/testthat.R0000644000176200001440000000006014313316272024633 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/teardown/NAMESPACE0000644000176200001440000000005614313316272022732 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/teardown/DESCRIPTION0000644000176200001440000000111214313316272023213 0ustar liggesusersPackage: teardown Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/syntax-error/0000755000176200001440000000000015047715224022351 5ustar liggesuserstestthat/tests/testthat/test-parallel/syntax-error/tests/0000755000176200001440000000000015047715224023513 5ustar liggesuserstestthat/tests/testthat/test-parallel/syntax-error/tests/testthat/0000755000176200001440000000000015130664352025351 5ustar liggesuserstestthat/tests/testthat/test-parallel/syntax-error/tests/testthat/test-error-2.R0000644000176200001440000000007615047715224027746 0ustar liggesuserstest_that("this fails", { expect_equal(Sys.getpid(), 0L) }) testthat/tests/testthat/test-parallel/syntax-error/tests/testthat/test-error-1.R0000644000176200001440000000012515047715224027740 0ustar liggesuserstest_that("this is good", { expect_equal(2 * 2, 4) }) but this is a syntax error! testthat/tests/testthat/test-parallel/syntax-error/tests/testthat.R0000644000176200001440000000006015047715224025472 0ustar liggesuserslibrary(testthat) library(ok) test_check("ok") testthat/tests/testthat/test-parallel/syntax-error/NAMESPACE0000644000176200001440000000005615047715224023571 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/syntax-error/DESCRIPTION0000644000176200001440000000110715047715224024056 0ustar liggesusersPackage: setup Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-parallel/snap/0000755000176200001440000000000015040747541020635 5ustar liggesuserstestthat/tests/testthat/test-parallel/snap/tests/0000755000176200001440000000000015040747541021777 5ustar liggesuserstestthat/tests/testthat/test-parallel/snap/tests/testthat/0000755000176200001440000000000015047715224023637 5ustar liggesuserstestthat/tests/testthat/test-parallel/snap/tests/testthat/test-snap-2.R0000644000176200001440000000006415047715224026037 0ustar liggesuserstest_that("snapshot", { expect_snapshot(11:20) }) testthat/tests/testthat/test-parallel/snap/tests/testthat/test-snap-1.R0000644000176200001440000000006315047715224026035 0ustar liggesuserstest_that("snapshot", { expect_snapshot(1:10) }) testthat/tests/testthat/test-parallel/snap/tests/testthat/test-snap-3.R0000644000176200001440000000006415047715224026040 0ustar liggesuserstest_that("snapshot", { expect_snapshot(21:30) }) testthat/tests/testthat/test-parallel/snap/tests/testthat/_snaps/0000755000176200001440000000000015040747541025122 5ustar liggesuserstestthat/tests/testthat/test-parallel/snap/tests/testthat/_snaps/snap-3.md0000644000176200001440000000012615040747541026544 0ustar liggesusers# snapshot Code 11:20 Output [1] 11 12 13 14 15 16 17 18 19 20 testthat/tests/testthat/test-parallel/snap/tests/testthat/_snaps/snap-2.md0000644000176200001440000000012615040747541026543 0ustar liggesusers# snapshot Code 11:20 Output [1] 11 12 13 14 15 16 17 18 19 20 testthat/tests/testthat/test-parallel/snap/tests/testthat/_snaps/snap-1.md0000644000176200001440000000012515040747541026541 0ustar liggesusers# snapshot Code 1:10 Output [1] 1 2 3 4 5 6 7 8 9 10 testthat/tests/testthat/test-parallel/snap/NAMESPACE0000644000176200001440000000005615040747541022055 0ustar liggesusers# Generated by roxygen2: do not edit by hand testthat/tests/testthat/test-parallel/snap/DESCRIPTION0000644000176200001440000000110415040747541022337 0ustar liggesusersPackage: ok Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person(given = "First", family = "Last", role = c("aut", "cre"), email = "first.last@example.com", comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 LazyData: true Roxygen: list(markdown = TRUE) RoxygenNote: 7.1.1 Suggests: testthat Config/testthat/parallel: true Config/testthat/edition: 3 testthat/tests/testthat/test-verify-constructed-calls.txt0000644000176200001440000000015615127731052023572 0ustar liggesusers> expr(foo(!!c("bar", "baz"))) foo(c("bar", "baz")) > binding <- quote(foo) > expr(foo(!!binding)) foo(foo) testthat/tests/testthat/test_dir/0000755000176200001440000000000015130237654016737 5ustar liggesuserstestthat/tests/testthat/test_dir/test-bare-expectations.R0000644000176200001440000000002314313316267023447 0ustar liggesusersexpect_equal(2, 2) testthat/tests/testthat/test_dir/test-helper.R0000644000176200001440000000020414313316267021312 0ustar liggesusers# test that the companion helper script is sourced by test_dir test_that("helper test", { expect_equal(hello(), "Hello World") }) testthat/tests/testthat/test_dir/test-skip.R0000644000176200001440000000013614313316267021005 0ustar liggesuserstest_that("Skips skip", { skip("Skipping to avoid certain failure") expect_true(FALSE) }) testthat/tests/testthat/test_dir/test-errors.R0000644000176200001440000000051014313316267021347 0ustar liggesuserstest_that("simple", { stop("argh") }) test_that("after one success", { expect_true(TRUE) stop("argh") expect_true(TRUE) }) test_that("after one failure", { expect_true(FALSE) stop("argh") }) test_that("in the test", { expect_true(stop("Argh")) }) test_that("in expect_error", { expect_error(stop("Argh")) }) testthat/tests/testthat/test_dir/helper_hello.R0000644000176200001440000000004214313316267021520 0ustar liggesusershello <- function() "Hello World" testthat/tests/testthat/test_dir/test-basic.R0000644000176200001440000000066714313316267021131 0ustar liggesuserstest_that("logical tests act as expected", { expect_true(TRUE) expect_false(FALSE) }) test_that("logical tests ignore attributes", { expect_true(c(a = TRUE)) expect_false(c(a = FALSE)) }) test_that("equality holds", { expect_equal(5, 5) expect_identical(10, 10) }) test_that("can't access variables from other tests 2", { a <- 10 }) test_that("can't access variables from other tests 1", { expect_false(exists("a")) }) testthat/tests/testthat/test_dir/test-failures.R0000644000176200001440000000032114313316267021645 0ustar liggesuserstest_that("just one failure", { expect_true(FALSE) }) test_that("one failure on two", { expect_false(FALSE) expect_true(FALSE) }) test_that("no failure", { expect_false(FALSE) expect_true(TRUE) }) testthat/tests/testthat/test_dir/test-empty.R0000644000176200001440000000012014313316267021166 0ustar liggesuserstest_that("empty test", NULL) test_that("empty test with error", stop("Argh")) testthat/tests/testthat/test-snapshot-manage.R0000644000176200001440000000662215054053615021313 0ustar liggesuserstest_that("informs about files being accepted", { path <- local_snapshot_dir(c( "a.md", "a.new.md", "test/b.txt", "test/b.new.txt" )) expect_snapshot(snapshot_accept(path = path)) expect_equal( dir(file.path(path, "_snaps"), recursive = TRUE), c("a.md", "test/b.txt") ) }) test_that("useful mesasge if no files to accept", { path <- local_snapshot_dir(character()) expect_snapshot(snapshot_accept(path = path)) }) test_that("can accept files created by expect_snapshot()", { # without extension path <- local_snapshot_dir(c("a.md", "a.new.md", "b.md", "b.new.md")) suppressMessages(snapshot_accept("a", path = path)) expect_equal(dir(file.path(path, "_snaps")), c("a.md", "b.md", "b.new.md")) # with extension path <- local_snapshot_dir(c("a.md", "a.new.md", "b.md", "b.new.md")) suppressMessages(snapshot_accept("a.md", path = path)) expect_equal(dir(file.path(path, "_snaps")), c("a.md", "b.md", "b.new.md")) # or whole directory path <- local_snapshot_dir(c("a.md", "a.new.md", "b.md", "b.new.md")) suppressMessages(snapshot_accept(path = path)) expect_equal(dir(file.path(path, "_snaps")), c("a.md", "b.md")) }) test_that("can accept files created by dotted tests in name", { # e.g. test-data.frame.R will create _snaps/data.frame.md # without extension path <- local_snapshot_dir(c("data.frame.md", "data.frame.new.md")) suppressMessages(snapshot_accept("data.frame", path = path)) expect_equal(dir(file.path(path, "_snaps")), "data.frame.md") # with extension path <- local_snapshot_dir(c("data.frame.md", "data.frame.new.md")) suppressMessages(snapshot_accept("data.frame.md", path = path)) expect_equal(dir(file.path(path, "_snaps")), "data.frame.md") }) test_that("can accept files created by expect_snapshot_file()", { path <- local_snapshot_dir(c("test/a.txt", "test/a.new.txt")) suppressMessages(snapshot_accept("test/a.txt", path = path)) expect_equal(dir(file.path(path, "_snaps"), recursive = TRUE), "test/a.txt") # including markdown files path <- local_snapshot_dir(c("test/a.md", "test/a.new.md")) suppressMessages(snapshot_accept("test/", path = path)) expect_equal(dir(file.path(path, "_snaps"), recursive = TRUE), "test/a.md") # or the whole directory path <- local_snapshot_dir(c( "test/a.md", "test/a.new.md", "test/b.txt", "test/b.new.txt" )) suppressMessages(snapshot_accept("test/", path = path)) expect_equal( dir(file.path(path, "_snaps"), recursive = TRUE), c("test/a.md", "test/b.txt") ) }) test_that("can work with variants", { # Can accept all path <- local_snapshot_dir(c("foo/a.md", "foo/a.new.md")) expect_snapshot(snapshot_accept(path = path)) expect_equal(dir(file.path(path, "_snaps", "foo")), "a.md") # Can accept specified path <- local_snapshot_dir(c("foo/a.md", "foo/a.new.md")) expect_snapshot(snapshot_accept("foo/a", path = path)) expect_equal(dir(file.path(path, "_snaps", "foo")), "a.md") }) test_that("snapshot_reject deletes .new files", { path <- local_snapshot_dir(c("a.md", "a.new.md", "b.md", "b.new.md")) expect_snapshot(snapshot_reject(path = path)) expect_equal(dir(file.path(path, "_snaps")), c("a.md", "b.md")) }) # snapshot_meta ----------------------------------------------------------- test_that("returns empty data frame for empty directory", { path <- tempfile() dir.create(path) expect_equal(nrow(snapshot_meta(path = path)), 0) }) testthat/tests/testthat/test-test-state.R0000644000176200001440000000062115047715224020317 0ustar liggesuserstest_that("set_state_inspector() verifies its inputs", { expect_snapshot(set_state_inspector(function(x) 123), error = TRUE) }) test_that("can detect state changes", { local_options(x = NULL) set_state_inspector(function() list(x = getOption("x"))) withr::defer(set_state_inspector(NULL)) expect_snapshot_reporter( CheckReporter$new(), test_path("reporters/state-change.R") ) }) testthat/tests/testthat/test-test-files.R0000644000176200001440000000621515053651614020305 0ustar liggesusers# test_dir() -------------------------------------------------------------- test_that("stops on failure", { withr::local_envvar(TESTTHAT_PARALLEL = "FALSE") expect_snapshot(error = TRUE, { test_dir(test_path("test_dir"), reporter = "silent") }) }) test_that("runs all tests and records output", { withr::local_envvar(TESTTHAT_PARALLEL = "FALSE") res <- test_dir( test_path("test_dir"), reporter = "silent", stop_on_failure = FALSE ) df <- as.data.frame(res) df$user <- df$system <- df$real <- df$result <- NULL local_reproducible_output(width = 200) local_edition(3) # set to 2 in ./test_dir expect_snapshot_output(print(df)) }) test_that("complains if no files", { withr::local_envvar(TESTTHAT_PARALLEL = "FALSE") path <- withr::local_tempfile() dir.create(path) expect_snapshot(error = TRUE, test_dir(path)) }) test_that("can control if failures generate errors", { withr::local_envvar(TESTTHAT_PARALLEL = "FALSE") test_error <- function(...) { test_dir(test_path("test-error"), reporter = "silent", ...) } expect_snapshot(error = TRUE, test_error(stop_on_failure = TRUE)) expect_no_error(test_error(stop_on_failure = FALSE)) }) test_that("can control if warnings errors", { withr::local_envvar(TESTTHAT_PARALLEL = "FALSE") test_warning <- function(...) { test_dir(test_path("test-warning"), reporter = "silent", ...) } expect_snapshot(error = TRUE, test_warning(stop_on_warning = TRUE)) expect_no_error(test_warning(stop_on_warning = FALSE)) }) # test_file --------------------------------------------------------------- test_that("can test single file", { out <- test_file(test_path("test_dir/test-basic.R"), reporter = "silent") expect_length(out, 5) }) test_that("complains if file doesn't exist", { expect_snapshot(error = TRUE, test_file("DOESNTEXIST")) }) # setup-teardown ---------------------------------------------------------- test_that("files created by setup still exist", { # These files should be created/delete by package-wide setup/teardown # We check that they exist here to make sure that they're not cleaned up # too early expect_true(file.exists("DELETE-ME")) expect_true(file.exists("DELETE-ME-2")) }) # helpers ----------------------------------------------------------------- test_that("can filter test scripts", { x <- c("test-a.R", "test-b.R", "test-c.R") expect_equal(filter_test_scripts(x), x) expect_equal(filter_test_scripts(x, "a"), x[1]) expect_equal(filter_test_scripts(x, "a", invert = TRUE), x[-1]) # Strips prefix/suffix expect_equal(filter_test_scripts(x, "test"), character()) expect_equal(filter_test_scripts(x, ".R"), character()) }) # ---------------------------------------------------------------------- test_that("can configure `load_all()` (#1636)", { path <- test_path("testConfigLoadAll") args <- find_load_all_args(path) expect_equal(args, list(export_all = FALSE, helpers = FALSE)) results <- test_local(path, reporter = "silent") for (res in results) { expect_equal(sum(res[["failed"]]), 0) } }) test_that("helpers are included in the testing environment", { expect_true("abcdefghi" %in% names(the$testing_env)) }) testthat/tests/testthat/test-expect-output.R0000644000176200001440000000276515104441727021061 0ustar liggesusersf <- function() NULL g <- function() cat("!") test_that("expect = NA checks for no output", { expect_success(expect_output(f(), NA)) expect_snapshot_failure(expect_output(g(), NA)) }) test_that("expect = NULL checks for some output", { expect_snapshot_failure(expect_output(f(), NULL)) expect_success(expect_output(g(), NULL)) }) test_that("expect = string checks for match", { expect_success(expect_output(g(), "!")) expect_snapshot_failure(expect_output(g(), "x")) expect_snapshot_failure(expect_output("a", "x")) }) test_that("multiline outputs captures and matches", { expect_success(expect_output(cat("1\n2"), "1\n2")) }) test_that("expect_output sets width", { x <- expect_output(getOption("width"), NA) expect_equal(x, 80) x <- expect_output(getOption("width"), NA, width = 20) expect_equal(x, 20) }) test_that("... passed on to grepl", { expect_success(expect_output(print("X"), "x", ignore.case = TRUE)) }) test_that("always returns first argument", { f1 <- function() { 1 } f2 <- function() { cat("x") 1 } expect_equal(expect_output(f1(), NA), 1) expect_equal(expect_output(f2()), 1) expect_equal(expect_output(f2(), "x"), 1) }) test_that("uses unicode characters in output where available", { skip_if_not(l10n_info()$`UTF-8`) bar <- "\u2551" expect_success(expect_output(cat(bar), "\u2551")) }) test_that("expect_output validates its inputs", { expect_snapshot(error = TRUE, { expect_output(cat("hello"), "hello", width = "wide") }) }) testthat/tests/testthat/testConfigLoadAll/0000755000176200001440000000000015040747541020461 5ustar liggesuserstestthat/tests/testthat/testConfigLoadAll/tests/0000755000176200001440000000000015040747541021623 5ustar liggesuserstestthat/tests/testthat/testConfigLoadAll/tests/testthat/0000755000176200001440000000000015130664352023461 5ustar liggesuserstestthat/tests/testthat/testConfigLoadAll/tests/testthat/helper-config.R0000644000176200001440000000003515040747541026326 0ustar liggesusersmy_helper <- function() NULL testthat/tests/testthat/testConfigLoadAll/tests/testthat/test-config.R0000644000176200001440000000046715040747541026037 0ustar liggesuserstest_that("helpers are not on the path", { expect_false( "my_helper" %in% names(rlang::pkg_env("testConfigLoadAll")) ) }) test_that("internal functions are not on the path", { fns <- names(rlang::pkg_env("testConfigLoadAll")) expect_false("internal" %in% fns) expect_true("exported" %in% fns) }) testthat/tests/testthat/testConfigLoadAll/tests/testthat.R0000644000176200001440000000061215040747541023605 0ustar liggesusers# This file is part of the standard setup for testthat. # It is recommended that you do not modify it. # # Where should you do additional test configuration? # Learn more about the roles of various files in: # * https://r-pkgs.org/tests.html # * https://testthat.r-lib.org/reference/test_package.html#special-files library(testthat) library(testConfigLoadAll) test_check("testConfigLoadAll") testthat/tests/testthat/testConfigLoadAll/R/0000755000176200001440000000000015040747541020662 5ustar liggesuserstestthat/tests/testthat/testConfigLoadAll/R/config.R0000644000176200001440000000010415040747541022245 0ustar liggesusersinternal <- function() NULL #' @export exported <- function() NULL testthat/tests/testthat/testConfigLoadAll/NAMESPACE0000644000176200001440000000007715040747541021704 0ustar liggesusers# Generated by roxygen2: do not edit by hand export(exported) testthat/tests/testthat/testConfigLoadAll/DESCRIPTION0000644000176200001440000000111715040747541022167 0ustar liggesusersPackage: testConfigLoadAll Title: What the Package Does (One Line, Title Case) Version: 0.0.0.9000 Authors@R: person("First", "Last", , "first.last@example.com", role = c("aut", "cre"), comment = c(ORCID = "YOUR-ORCID-ID")) Description: What the package does (one paragraph). License: `use_mit_license()`, `use_gpl3_license()` or friends to pick a license Encoding: UTF-8 Roxygen: list(markdown = TRUE) RoxygenNote: 7.2.0 Config/testthat/load-all: list(export_all = FALSE, helpers = FALSE) Imports: rlang Suggests: testthat (>= 3.0.0) Config/testthat/edition: 3 testthat/tests/testthat/test-snapshot-reporter.R0000644000176200001440000001511315054412736021723 0ustar liggesuserstest_that("can establish local snapshotter for testing", { snapper <- local_test_snapshotter() snapper$start_file("snapshot-1", "test") expect_true(snapper$is_active()) expect_equal(snapper$file, "snapshot-1") expect_equal(snapper$test, "test") }) test_that("basic workflow", { local_on_cran(FALSE) path <- withr::local_tempdir() snapper <- local_test_snapshotter(snap_dir = path) snapper$start_file("snapshot-2") # output if not active (because test not set here) expect_snapshot_output("x") |> expect_message("Can't save") |> expect_output("[1] \"x\"", fixed = TRUE) # warns on first creation snapper$start_file("snapshot-2", "test") expect_warning(expect_snapshot_output("x"), "Adding new") snapper$end_file() expect_true(file.exists(file.path(path, "snapshot-2.md"))) expect_false(file.exists(file.path(path, "snapshot-2.new.md"))) # succeeds if unchanged snapper$start_file("snapshot-2", "test") expect_success(expect_snapshot_output("x")) snapper$end_file() expect_true(file.exists(file.path(path, "snapshot-2.md"))) expect_false(file.exists(file.path(path, "snapshot-2.new.md"))) # fails if changed snapper$start_file("snapshot-2", "test") expect_failure(expect_snapshot_output("y")) snapper$end_file() expect_true(file.exists(file.path(path, "snapshot-2.md"))) expect_true(file.exists(file.path(path, "snapshot-2.new.md"))) }) test_that("defaults to failing on CI", { local_on_cran(FALSE) withr::local_envvar(CI = "true") path <- withr::local_tempdir() snapper <- local_snapshotter(snap_dir = path) snapper$start_file("snapshot-2") # warns on first creation snapper$start_file("snapshot-2", "test") expect_error(expect_snapshot_output("x"), "Adding new") }) test_that("only create new files for changed variants", { local_on_cran(FALSE) snapper <- local_test_snapshotter() snapper$start_file("variants", "test") expect_warning(expect_snapshot_output("x"), "Adding new") expect_warning(expect_snapshot_output("x", variant = "a"), "Adding new") expect_warning(expect_snapshot_output("x", variant = "b"), "Adding new") snapper$end_file() expect_setequal( snapper$snap_files(), c("variants.md", "a/variants.md", "b/variants.md") ) # failure in default snapper$start_file("variants", "test") expect_failure(expect_snapshot_output("y")) expect_success(expect_snapshot_output("x", variant = "a")) expect_success(expect_snapshot_output("x", variant = "b")) snapper$end_file() expect_setequal( snapper$snap_files(), c("variants.md", "variants.new.md", "a/variants.md", "b/variants.md") ) unlink(file.path(snapper$snap_dir, "variants.new.md")) # failure in variant snapper$start_file("variants", "test") expect_success(expect_snapshot_output("x")) expect_success(expect_snapshot_output("x", variant = "a")) expect_failure(expect_snapshot_output("y", variant = "b")) snapper$end_file() expect_setequal( snapper$snap_files(), c("variants.md", "a/variants.md", "b/variants.md", "b/variants.new.md") ) }) test_that("only reverting change in variant deletes .new", { local_on_cran(FALSE) snapper <- local_test_snapshotter() snapper$start_file("v", "test") expect_warning(expect_snapshot_output("x", variant = "a"), "Adding new") expect_warning(expect_snapshot_output("x", variant = "b"), "Adding new") snapper$end_file() expect_setequal(snapper$snap_files(), c("a/v.md", "b/v.md")) # failure snapper$start_file("v", "test") expect_failure(expect_snapshot_output("y", variant = "a")) snapper$end_file() expect_setequal(snapper$snap_files(), c("a/v.md", "b/v.md", "a/v.new.md")) # success snapper$start_file("v", "test") expect_success(expect_snapshot_output("x", variant = "a")) snapper$end_file() expect_setequal(snapper$snap_files(), c("a/v.md", "b/v.md")) }) test_that("removing tests removes snap file", { local_on_cran(FALSE) path <- withr::local_tempdir() snapper <- local_test_snapshotter(snap_dir = path) snapper$start_file("snapshot-3", "test") expect_warning(expect_snapshot_output("x"), "Adding new") snapper$end_file() expect_true(file.exists(file.path(path, "snapshot-3.md"))) snapper$start_file("snapshot-3", "test") snapper$end_file() expect_false(file.exists(file.path(path, "snapshot-3.md"))) }) test_that("errors in test doesn't change snapshot", { local_on_cran(FALSE) snapper <- local_test_snapshotter() # First run snapper$start_file("snapshot-5", "test") expect_warning(expect_snapshot_output("x"), "Adding new") snapper$end_file() # Second run has error snapper$start_file("snapshot-5", "test") snapper$add_result(NULL, NULL, as.expectation(simpleError("error"))) snapper$end_file() # Third run snapper$start_file("snapshot-5", "test") expect_warning(expect_snapshot_output("x"), NA) snapper$end_file() # No warning if snapshot already happened snapper$start_file("snapshot-5", "test") expect_snapshot_output("x") expect_warning( snapper$add_result(NULL, NULL, as.expectation(simpleError("error"))), NA ) snapper$end_file() }) test_that("skips and unexpected errors reset snapshots", { regenerate <- FALSE if (regenerate) { withr::local_envvar(c(TESTTHAT_REGENERATE_SNAPS = "true")) } catch_cnd( test_file( test_path("test-snapshot", "test-snapshot.R"), reporter = NULL ) ) path <- "test-snapshot/_snaps/snapshot.md" snaps <- snap_from_md(brio::read_lines(path)) titles <- c("errors reset snapshots", "skips reset snapshots") expect_true(all(titles %in% names(snaps))) }) test_that("`expect_error()` can fail inside `expect_snapshot()`", { out <- test_file( test_path("test-snapshot", "test-expect-condition.R"), reporter = NULL ) err <- out[[1]]$results[[1]] expect_snapshot(err$message) }) test_that("can filter with desc", { path <- withr::local_tempdir() # First record some results suppressWarnings({ snapper <- local_test_snapshotter(snap_dir = path) snapper$start_file("snapshot") snapper$start_test(test = "x") expect_snapshot_output(cat("x"), cran = TRUE) snapper$end_test() snapper$start_test(test = "y") expect_snapshot_output(cat("y"), cran = TRUE) snapper$end_test() snapper$end_file() }) snaps_all <- readLines(file.path(path, "snapshot.md")) # Now pretend we just ran one snapper <- local_test_snapshotter(snap_dir = path, desc = "x") snapper$start_file("snapshot") snapper$start_test(test = "x") expect_snapshot_output(cat("x"), cran = TRUE) snapper$end_test() snapper$end_file() snaps_filtered <- readLines(file.path(path, "snapshot.md")) expect_equal(snaps_all, snaps_filtered) }) testthat/tests/testthat/test-capture-output.R0000644000176200001440000000104715047427375021235 0ustar liggesuserstest_that("multiplication works", { utf8 <- "M\u00e4chler" latin1 <- "M\xe4chler" Encoding(latin1) <- "latin1" expect_equal(capture_output_lines(cat(latin1)), utf8, ignore_encoding = FALSE) }) test_that("capture output captures output", { out1 <- capture_output(print(1:5)) out2 <- capture_output(1:5, print = TRUE) expect_equal(out1, "[1] 1 2 3 4 5") expect_equal(out2, "[1] 1 2 3 4 5") }) test_that("capture output doesn't print invisible things", { out <- capture_output(invisible(1), print = TRUE) expect_equal(out, "") }) testthat/tests/testthat/test-reporter-list.R0000644000176200001440000000730615072252215021036 0ustar liggesusers# regression test: test_file() used to crash with a NULL reporter test_that("ListReporter with test_file and NULL reporter", { test_file_path <- 'test-list-reporter/test-exercise-list-reporter.R' expect_no_error(test_file(test_path(test_file_path), reporter = NULL)) }) # regression: check that an exception is reported if it is raised in the test file outside # of a test (test_that() call). # N.B: the exception here happens between two tests: "before" and "after" test_that("ListReporter - exception outside of test_that()", { test_file_path <- 'test-list-reporter/test-exception-outside-tests.R' res <- test_file(test_path(test_file_path), reporter = NULL) expect_true(is.list(res)) # 2 results: first test "before" + the exception. N.B: the 2nd test "after" is not reported expect_length(res, 2) df <- as.data.frame(res) # the first result should be the results of test "before", that was successful expect_identical(df$test[1], 'before') expect_equal(df$passed[1], 1) expect_false(df$error[1]) # the 2nd result should be the exception expect_true(is.na(df$test[2])) # no test name expect_true(df$error[2]) # it was an error expect_match(res[[2]]$results[[1]]$message, "dying outside of tests") }) test_that("captures error if only thing in file", { test_file_path <- 'test-list-reporter/test-only-error.R' res <- test_file(test_path(test_file_path), reporter = NULL) expect_length(res, 1) expect_s3_class(res[[1]]$results[[1]], "expectation_error") }) # ListReporter on a "standard" test file: 2 contexts, passing, failing and crashing tests test_that("exercise ListReporter", { test_file_path <- 'test-list-reporter/test-exercise-list-reporter.R' res <- test_file(test_path(test_file_path), reporter = NULL) expect_s3_class(res, "testthat_results") # we convert the results to data frame for convenience df <- as.data.frame(res) expect_equal(nrow(df), 5) expect_equal( df$test, c("test1", "test2", "test-pass", "test-fail", "test-error") ) # test "A failing test" is the only failing test expect_equal(df$failed, c(0, 0, 0, 1, 0)) expect_identical(expectation_type(res[[4]]$results[[1]]), "failure") # test "A crashing test" is the only crashing test expect_equal(df$error, c(FALSE, FALSE, FALSE, FALSE, TRUE)) expect_identical(expectation_type(res[[5]]$results[[1]]), "error") }) # bare expectations are ignored test_that("ListReporter and bare expectations", { test_file_path <- 'test-list-reporter/test-bare-expectations.R' res <- test_file(test_path(test_file_path), reporter = NULL) df <- as.data.frame(res) # 2 tests, "before" and "after". no result for the bare expectation expect_identical(df$test, c("before", "after")) }) test_that("works in parallel", { lr <- ListReporter$new() lr$start_file("f1") lr$start_test(NULL, "t11") lr$add_result(NULL, "t11", new_expectation("success", "msg111")) lr$start_file("f2") lr$start_test(NULL, "t21") lr$add_result(NULL, "t21", new_expectation("success", "msg211")) lr$start_file("f1") lr$start_test(NULL, "t11") lr$add_result(NULL, "t11", new_expectation("success", "msg112")) lr$end_test(NULL, "t11") lr$start_file("f2") lr$start_test(NULL, "t21") lr$add_result(NULL, "t21", new_expectation("success", "msg212")) lr$end_test(NULL, "t21") lr$start_file("f2") lr$start_test(NULL, "t22") lr$add_result(NULL, "t22", new_expectation("skip", "skip221")) lr$end_test(NULL, "t22") lr$start_file("f2") lr$end_file() lr$start_file("f1") lr$end_file() results <- as.data.frame(lr$get_results()) expect_snapshot({ results[, c(1:8, 12:13)] }) expect_true(all(!is.na(results$user))) expect_true(all(!is.na(results$system))) expect_true(all(!is.na(results$real))) }) testthat/tests/testthat/test-describe.R0000644000176200001440000000421015054567271020005 0ustar liggesusersdescribe("describe", { it("can contain nested describe blocks", { describe("addition", { it("should be able to add two numbers", { expect_equal(2, 1 + 1) }) describe("sub feature", { it("should also work", { expect_equal(2, 1 + 1) }) }) }) }) }) test_that("can write snaphot tests", { local_description_set() describe("snapshot tests in describe", { expect_snapshot(1 + 1) it("and in it", { expect_snapshot(2 + 2) }) }) }) test_that("unimplemented specs generate skips", { expectations <- capture_expectations({ it("can have not yet implemented specs", { describe("Millennium Prize Problems", { it("can be shown that P != NP") }) }) }) expect_length(expectations, 1) expect_s3_class(expectations[[1]], "expectation_skip") }) someExternalVariable <- 1 describe("variable scoping", { someInternalVariable <- 1 it("should be possible to use variables from outer environments", { expect_equal(1, someExternalVariable) expect_equal(1, someInternalVariable) someInternalVariable <- 2 someExternalVariable <- 3 }) # prefix is needed to test this use case testthat::it("even when using it() directly", { expect_equal(1, someExternalVariable) expect_equal(1, someInternalVariable) }) it("shouldn't affect other tests", { expect_equal(1, someExternalVariable) expect_equal(1, someInternalVariable) }) }) test_that("has to have a valid description for the block", { expect_snapshot(error = TRUE, { describe() describe(c("a", "b")) it() it(c("a", "b")) }) }) test_that("names are concatenated", { expectations <- capture_expectations({ describe("a", { describe("b", { it("c", { it("d", { expect_true(TRUE) }) }) }) }) }) expect_equal(expectations[[1]]$test, "a / b / c / d") }) test_that("skips are scoped to describe/it", { expectations <- capture_expectations({ describe("", skip()) describe("", expect_true()) it("", skip()) it("", expect_true(TRUE)) }) expect_length(expectations, 4) }) testthat/tests/testthat/test-otel.R0000644000176200001440000000641315127460763017176 0ustar liggesuserstest_that("otel instrumentation works", { skip_if_not_installed("otelsdk") record <- with_otel_record({ test_that("testing is traced", { expect_equal(1, 1) expect_error(stop("expected error")) }) test_that("all expectations are recorded", { expect_equal(1, 1) expect_true(TRUE) expect_length(1:3, 3) expect_warning(warning("expected warning")) expect_error(stop("expected error")) }) }) traces <- record$traces expect_length(traces, 2L) span <- traces[[1L]] expect_equal( span$name, "test that otel instrumentation works / testing is traced" ) expect_equal(span$instrumentation_scope$name, "org.r-lib.testthat") span <- traces[[2L]] expect_equal(span$attributes[["test.status"]], "pass") expect_equal(span$attributes[["test.expectations.total"]], 5) expect_equal(span$attributes[["test.expectations.passed"]], 5) expect_equal(span$attributes[["test.expectations.failed"]], 0) expect_equal(span$attributes[["test.expectations.error"]], 0) expect_equal(span$attributes[["test.expectations.skipped"]], 0) expect_equal(span$attributes[["test.expectations.warning"]], 0) expect_equal(span$status, "ok") }) test_that("otel instrumentation works with describe/it", { skip_if_not_installed("otelsdk") record <- with_otel_record({ with_reporter("silent", { describe("a feature", { it("passes", { expect_true(TRUE) }) it("fails", { expect_equal(1, 1) expect_true(FALSE) }) }) }) }) traces <- record$traces expect_length(traces, 3L) expect_equal(traces[[1L]]$name, "test that a feature / passes") expect_equal(traces[[1L]]$attributes[["test.expectations.total"]], 1) expect_equal(traces[[1L]]$attributes[["test.status"]], "pass") expect_equal(traces[[1L]]$status, "ok") expect_equal(traces[[2L]]$name, "test that a feature / fails") expect_equal(traces[[2L]]$attributes[["test.expectations.total"]], 2) expect_equal(traces[[2L]]$attributes[["test.expectations.passed"]], 1) expect_equal(traces[[2L]]$attributes[["test.expectations.failed"]], 1) expect_equal(traces[[2L]]$attributes[["test.status"]], "fail") expect_equal(traces[[2L]]$status, "error") expect_equal(traces[[3L]]$name, "test that a feature") expect_equal(traces[[3L]]$attributes[["test.expectations.total"]], 0) }) test_that("otel instrumentation works with nested test_that", { skip_if_not_installed("otelsdk") record <- with_otel_record({ with_reporter("silent", { test_that("outer test", { expect_true(TRUE) test_that("inner test fails", { expect_equal(1, 2) }) }) }) }) traces <- record$traces expect_length(traces, 2L) expect_equal(traces[[1L]]$name, "test that outer test / inner test fails") expect_equal(traces[[1L]]$attributes[["test.expectations.total"]], 1) expect_equal(traces[[1L]]$attributes[["test.expectations.failed"]], 1) expect_equal(traces[[1L]]$attributes[["test.status"]], "fail") expect_equal(traces[[1L]]$status, "error") expect_equal(traces[[2L]]$name, "test that outer test") expect_equal(traces[[2L]]$attributes[["test.expectations.total"]], 1) expect_equal(traces[[2L]]$attributes[["test.status"]], "pass") expect_equal(traces[[2L]]$status, "ok") }) testthat/tests/testthat/test-reporter-teamcity.R0000644000176200001440000000013214164710003021663 0ustar liggesuserstest_that("reporter basics work", { expect_snapshot_reporter(TeamcityReporter$new()) }) testthat/tests/testthat/test-reporter-slow.R0000644000176200001440000000052615047715224021052 0ustar liggesuserstest_that("multiplication works", { n_tests <- function(n) { for (i in 1:n) { test_that(paste0("run ", i), { suceed() }) } } expect_snapshot( { show_all <- SlowReporter$new(min_time = 0) with_reporter(show_all, n_tests(10)) }, transform = \(x) gsub("\\d.\\d\\ds", "-.--s", x), ) }) testthat/tests/testthat/test-reporter.R0000644000176200001440000000236015127561732020070 0ustar liggesuserstest_that("can control output with file arg/option", { # powered through Reporter base class so we only test one reporter path <- withr::local_tempfile() with_reporter( MinimalReporter$new(file = path), test_one_file(test_path("reporters/tests.R")) ) expect_snapshot_output(readLines(path)) withr::local_options(testthat.output_file = path) with_reporter( MinimalReporter$new(), test_one_file(test_path("reporters/tests.R")) ) expect_snapshot_output(readLines(path)) }) test_that("should not automatically skip in non-utf-8 locales", { withr::local_locale(LC_CTYPE = "C") expect_true(TRUE) }) test_that("default_reporter() selects appropriate reporter", { withr::local_envvar(CLAUDECODE = NA) expect_equal(default_reporter(), "Progress") expect_equal(default_reporter(parallel = TRUE), "ParallelProgress") withr::local_envvar(CLAUDECODE = "1") expect_equal(default_reporter(), "Llm") expect_equal(default_reporter(parallel = TRUE), "Llm") }) test_that("default_compact_reporter() selects appropriate reporter", { withr::local_envvar(CLAUDECODE = NA) expect_equal(default_compact_reporter(), "CompactProgress") withr::local_envvar(CLAUDECODE = "1") expect_equal(default_compact_reporter(), "Llm") }) testthat/tests/testthat/test-expect-constant.R0000644000176200001440000000141515057655662021355 0ustar liggesuserstest_that("logical tests act as expected", { df <- data.frame(1:10) expect_success(expect_true(TRUE)) expect_snapshot_failure(expect_true(df)) expect_success(expect_false(FALSE)) expect_snapshot_failure(expect_false(df)) }) test_that("logical tests ignore attributes", { expect_success(expect_true(c(a = TRUE))) expect_success(expect_false(c(a = FALSE))) }) test_that("additional info returned in message", { expect_failure(expect_true(FALSE, "NOPE"), "NOPE") expect_failure(expect_false(TRUE, "YUP"), "YUP") }) test_that("expect_null works", { x <- NULL df <- data.frame(1:10) expect_success(expect_null(x)) expect_snapshot_failure(expect_null(df)) }) test_that("returns the input value", { res <- expect_true(TRUE) expect_equal(res, TRUE) }) testthat/tests/testthat/test-warning/0000755000176200001440000000000015127731052017541 5ustar liggesuserstestthat/tests/testthat/test-warning/test-warning.R0000644000176200001440000000012114313316267022303 0ustar liggesuserstest_that("warning emitted", { warning("This is not a test", call. = FALSE) }) testthat/tests/testthat/test-snapshot.R0000644000176200001440000001506115127561732020067 0ustar liggesuserstest_that("variants save different values", { expect_snapshot(r_version(), variant = r_version()) }) test_that("can snapshot output", { foo <- function() cat("y") expect_snapshot_output(foo()) expect_snapshot_output(foo()) expect_snapshot_output(foo()) expect_snapshot_output(foo()) }) test_that("can snapshot everything", { f <- function() { print("1") message("2") warning("3") stop("4") } expect_snapshot(f(), error = TRUE) }) test_that("empty lines are preserved", { f <- function() { cat("1\n\n") message("2\n") warning("3\n") stop("4\n\n") } expect_snapshot(f(), error = TRUE) }) test_that("line-endings fixed before comparison", { x <- "a\n\rb" expect_snapshot(cat(x)) }) test_that("multiple outputs of same type are collapsed", { expect_snapshot({ x <- 1 y <- 1 { message("a") message("b") } { warning("a") warning("b") } }) }) test_that("can scrub output/messages/warnings/errors", { secret <- function() { print("secret") message("secret") warning("secret") stop("secret") } redact <- function(x) gsub("secret", "", x) expect_snapshot(secret(), transform = redact, error = TRUE) # Or with an inline fun expect_snapshot(print("secret"), transform = \(x) gsub("secret", "****", x)) }) test_that("always checks error status", { expect_error(expect_snapshot(stop("!"), error = FALSE)) expect_snapshot_failure(expect_snapshot(print("!"), error = TRUE)) }) test_that("snapshots of failures fail", { expect_snapshot_failure(expect_snapshot(fail())) }) test_that("can capture error/warning messages", { expect_snapshot_error(stop("This is an error")) expect_snapshot_warning(warning("This is a warning")) }) test_that("snapshot captures deprecations", { foo <- function() { lifecycle::deprecate_warn("1.0.0", "foo()") } expect_snapshot(foo()) expect_snapshot_warning(foo()) expect_snapshot_warning(foo(), class = "lifecycle_warning_deprecated") }) test_that("can check error/warning classes", { expect_snapshot(expect_snapshot_error(1), error = TRUE) expect_snapshot(expect_snapshot_error(1, class = "myerror"), error = TRUE) expect_snapshot(expect_snapshot_warning(1), error = TRUE) expect_snapshot(expect_snapshot_warning(1, class = "mywarning"), error = TRUE) }) test_that("snapshot handles multi-line input", { expect_snapshot({ 1 + 2 3 + 4 "this is a comment" }) }) test_that("snapshot captures output if visible", { f_visible <- function() "x" f_invisible <- function() invisible("x") expect_snapshot(f_visible()) expect_snapshot(f_invisible()) }) test_that("captures custom classes", { f <- function() { inform("Hello", class = "testthat_greeting") warn("Goodbye", class = "testthat_farewell") abort("Eeek!", class = "testthat_scream") } expect_snapshot(f(), error = TRUE) }) test_that("even with multiple lines", { expect_snapshot_output(cat("a\nb\nc")) expect_snapshot_output(cat("a\nb\nc\n")) }) test_that("`expect_snapshot()` does not inject", { expect_snapshot({ x <- quote(!!foo) expect_equal(x, call("!", call("!", quote(foo)))) }) }) test_that("full condition message is printed with rlang", { expect_snapshot(error = TRUE, { foo <- error_cnd("foo", message = "Title parent.") abort("Title.", parent = foo) }) }) test_that("can print with and without condition classes", { f <- function() { message("foo") warning("bar") stop("baz") } expect_snapshot(error = TRUE, cnd_class = TRUE, f()) expect_snapshot(error = TRUE, cnd_class = FALSE, f()) }) test_that("errors and warnings are folded", { f <- function() { warning("foo") stop("bar") } expect_snapshot(error = TRUE, f()) }) # I don't know how to test this automatically; wrapping it in another # snapshot doesn't capture the behaviour I expected, presumably due to the # way that errors bubble up # test_that("errors in snapshots behave like regular errors", { # f <- function() g() # g <- function() h() # h <- function() abort("!") # # expect_snapshot(f()) # expect_snapshot(1 + 1) # }) test_that("extracts original error class", { catch_entraced <- function(code) { tryCatch( withCallingHandlers(code, error = function(cnd) rlang::entrace(cnd)), error = function(cnd) cnd ) } cnd <- catch_entraced(stop("!", call. = FALSE)) expect_equal(error_class(cnd), "simpleError") cnd <- catch_entraced(stop(errorCondition("!", class = "badError"))) expect_equal(error_class(cnd), "badError") cnd <- catch_entraced(abort("!")) expect_equal(error_class(cnd), "rlang_error") }) test_that("hint is informative", { local_mocked_bindings(in_check_reporter = function() FALSE) withr::local_envvar(GITHUB_ACTIONS = "false", TESTTHAT_WD = NA) expect_snapshot(snapshot_hint("bar.R", reset_output = FALSE)) }) test_that("hint includes path when WD is different", { local_mocked_bindings(in_check_reporter = function() FALSE) withr::local_envvar(TESTTHAT_WD = "..") hint <- snapshot_hint("bar.R", reset_output = FALSE) # Can't use snapshot here because its hint will get the wrong path expect_match( hint, 'snapshot_accept("bar.R", "testthat")', fixed = TRUE, all = FALSE ) }) test_that("expect_snapshot requires a non-empty test label", { local_description_set() local_on_cran(FALSE) test_that("", { expect_error(expect_snapshot(1 + 1)) }) pass() # quiet message about this test being empty }) test_that("expect_snapshot validates its inputs", { expect_snapshot(error = TRUE, { expect_snapshot(1 + 1, cran = "yes") expect_snapshot(1 + 1, error = "yes") expect_snapshot(1 + 1, cnd_class = "yes") }) }) test_that("expect_snapshot_output validates its inputs", { expect_snapshot(error = TRUE, { expect_snapshot_output(cat("test"), cran = "yes") }) }) test_that("expect_snapshot_error validates its inputs", { expect_snapshot(error = TRUE, { expect_snapshot_error(stop("!"), class = 123) expect_snapshot_error(stop("!"), cran = "yes") }) }) test_that("expect_snapshot_warning validates its inputs", { expect_snapshot(error = TRUE, { expect_snapshot_warning(warning("!"), class = 123) expect_snapshot_warning(warning("!"), cran = "yes") }) }) test_that("on CRAN, snapshots generate skip at end of test", { local_on_cran(TRUE) expectations <- capture_expectations(test_that("", { expect_snapshot(1 + 1) expect_true(TRUE) })) expect_length(expectations, 2) expect_s3_class(expectations[[1]], "expectation_success") expect_s3_class(expectations[[2]], "expectation_skip") }) testthat/tests/testthat/test-expect-all.R0000644000176200001440000000162015072252215020252 0ustar liggesuserstest_that("validates its inputs", { expect_snapshot(error = TRUE, { expect_all_equal(mean, 1) expect_all_equal(logical(), 1) expect_all_equal(1:10, mean) expect_all_equal(1:10, 1:2) }) }) test_that("can compare atomic vectors", { x <- rep(TRUE, 10) expect_success(expect_all_equal(x, TRUE)) x[5] <- FALSE expect_snapshot_failure(expect_all_equal(x, TRUE)) }) test_that("can compare named lists", { x <- list(a = 1, b = 1, c = 2, d = 1, e = 1) expect_snapshot_failure(expect_all_equal(x, list(1))) }) test_that("has tolerance enabled", { expect_success(expect_all_equal(1, 1L)) }) test_that("truncates very long differences", { x <- rep(TRUE, 10) expect_snapshot_failure(expect_all_equal(x, FALSE)) }) test_that("has TRUE and FALSE helpers", { x1 <- rep(TRUE, 10) x2 <- rep(FALSE, 10) expect_success(expect_all_true(x1)) expect_success(expect_all_false(x2)) }) testthat/tests/testthat/test-context.R0000644000176200001440000000264615053661631017716 0ustar liggesusersCountReporter <- R6::R6Class( "CountReporter", inherit = Reporter, public = list( context_i = 0, context_count = 0, test_i = 0, test_count = 0, start_context = function(context) { self$context_count <- self$context_count + 1 self$context_i <- self$context_i + 1 }, end_context = function(context) { self$context_i <- self$context_i - 1 stopifnot(self$context_i >= 0) }, start_test = function(context, test) { self$test_count <- self$test_count + 1 self$test_i <- self$test_i + 1 }, end_test = function(context, test) { self$test_i <- self$test_i - 1 stopifnot(self$test_i >= 0) } ) ) test_that("contexts are opened, then closed", { local_edition(2) report <- CountReporter$new() local_description_set() with_reporter(report, test_one_file("context.R")) expect_equal(report$context_count, 2) expect_equal(report$context_i, 0) expect_equal(report$test_count, 4) expect_equal(report$test_i, 0) }) test_that("context_name strips prefix and extensions correctly", { expect_equal(context_name("test-metrics.R"), "metrics") # uppercase expect_equal(context_name("test-metrics.r"), "metrics") # lowercase expect_equal(context_name("test-check.Rfile.R"), "check.Rfile") # suffix only expect_equal(context_name("test-test-test.R"), "test-test") # 1st prefix only expect_equal(context_name("test_metrics.R"), "metrics") }) testthat/tests/testthat/test-expect-known.txt0000644000176200001440000000001615127731047021260 0ustar liggesusersü ⩝ 我 и testthat/tests/testthat/test-test-compiled-code.R0000644000176200001440000000126315047715224021706 0ustar liggesuserstest_that("get_routine() finds own 'run_testthat_tests'", { routine <- get_routine("testthat", "run_testthat_tests") expect_s3_class(routine, "NativeSymbolInfo") }) test_that("get_routine() fails when no routine exists", { expect_snapshot(error = TRUE, get_routine("utils", "no_such_routine")) }) test_that("validates inputs", { expect_snapshot(error = TRUE, { expect_cpp_tests_pass(123) run_cpp_tests(123) }) }) test_that("useful messaging", { path <- withr::local_tempdir() writeLines("Package: foo", file.path(path, "DESCRIPTION")) dir.create(file.path(path, "R")) expect_snapshot(use_catch(path)) }) skip_if_not_installed("xml2") run_cpp_tests("testthat") testthat/tests/testthat/test-expect-shape.R0000644000176200001440000000773615054145645020630 0ustar liggesuserstest_that("length computed correctly", { expect_success(expect_length(1, 1)) expect_success(expect_length(1:10, 10)) expect_success(expect_length(letters[1:5], 5)) }) test_that("generates actionable failure message", { x <- 1:10 expect_snapshot_failure(expect_length(x, 2)) }) test_that("uses S4 length method", { A <- setClass("ExpectLengthA", slots = c(x = "numeric", y = "numeric")) setMethod("length", "ExpectLengthA", function(x) 5L) expect_success(expect_length(A(x = 1:9, y = 3), 5)) }) test_that("returns input", { x <- list(1:10, letters) out <- expect_length(x, 2) expect_identical(out, x) }) test_that("expect_length validates its inputs", { expect_snapshot(error = TRUE, { expect_length(1:5, "a") }) }) test_that("dim compared correctly", { expect_success(expect_shape(matrix(nrow = 5, ncol = 4), dim = c(5L, 4L))) expect_snapshot_failure(expect_shape( matrix(nrow = 6, ncol = 3), dim = c(6L, 2L) )) expect_snapshot_failure(expect_shape( matrix(nrow = 6, ncol = 3), dim = c(7L, 3L) )) expect_success(expect_shape(data.frame(1:10, 11:20), dim = c(10, 2))) expect_success(expect_shape(array(dim = 1:3), dim = 1:3)) expect_snapshot_failure(expect_shape(array(dim = 1:3), dim = 1:2)) expect_snapshot_failure(expect_shape(array(dim = 1:3), dim = 1:4)) expect_success(expect_shape(array(integer()), dim = 0L)) dd <- c(0L, 0L, 0L, 5L, 0L, 0L, 0L) expect_success(expect_shape(array(dim = dd), dim = dd)) x <- cbind(1:2, 3:4) out <- expect_shape(x, dim = c(2L, 2L)) expect_identical(out, x) }) test_that("nrow compared correctly", { expect_success(expect_shape(matrix(nrow = 5, ncol = 4), nrow = 5L)) expect_snapshot_failure(expect_shape(matrix(nrow = 5, ncol = 5), nrow = 6L)) expect_success(expect_shape(data.frame(1:10, 11:20), nrow = 10L)) expect_snapshot_failure(expect_shape(1, nrow = 1)) expect_success(expect_shape(array(integer()), nrow = 0L)) dd <- c(0L, 0L, 0L, 5L, 0L, 0L, 0L) expect_success(expect_shape(array(dim = dd), nrow = 0L)) x <- cbind(1:2, 3:4) out <- expect_shape(x, dim = c(2L, 2L)) expect_identical(out, x) }) test_that("ncol compared correctly", { expect_success(expect_shape(matrix(nrow = 5, ncol = 4), ncol = 4L)) expect_snapshot_failure(expect_shape(matrix(nrow = 5, ncol = 5), ncol = 7L)) expect_success(expect_shape(data.frame(1:10, 11:20), ncol = 2L)) expect_snapshot_failure(expect_shape(array(1), ncol = 1)) expect_snapshot_failure(expect_shape(array(integer()), ncol = 0L)) dd <- c(0L, 0L, 0L, 5L, 0L, 0L, 0L) expect_success(expect_shape(array(dim = dd), ncol = 0L)) x <- cbind(1:2, 3:4) out <- expect_shape(x, dim = c(2L, 2L)) expect_identical(out, x) }) test_that("uses S3 dim method", { local_bindings( dim.testthat_expect_shape = function(x) 1:2, .env = globalenv() ) x <- structure(integer(), class = "testthat_expect_shape") expect_success(expect_shape(x, dim = 1:2)) }) test_that("NA handling (e.g. dbplyr)", { local_bindings( dim.testthat_expect_shape_missing = function(x) c(NA_integer_, 10L), .env = globalenv() ) x <- structure(integer(), class = "testthat_expect_shape_missing") expect_success(expect_shape(x, nrow = NA_integer_)) expect_success(expect_shape(x, ncol = 10L)) expect_success(expect_shape(x, dim = c(NA_integer_, 10L))) expect_snapshot_failure(expect_shape(x, nrow = 10L)) expect_snapshot_failure(expect_shape(x, ncol = NA_integer_)) expect_snapshot_failure(expect_shape(x, dim = c(10L, NA_integer_))) }) test_that("uses S4 dim method", { A <- setClass("ExpectShapeA", slots = c(x = "numeric", y = "numeric")) setMethod("dim", "ExpectShapeA", function(x) 8:10) expect_success(expect_shape(A(x = 1:9, y = 3), dim = 8:10)) }) test_that("checks inputs arguments, ", { expect_snapshot(error = TRUE, { expect_shape(1:10) expect_shape(1:10, nrow = 1L, ncol = 2L) expect_shape(1:10, 2) expect_shape(array(1), nrow = "x") expect_shape(array(1), ncol = "x") expect_shape(array(1), dim = "x") }) }) testthat/tests/testthat/test-expect-silent.R0000644000176200001440000000120015054145645021002 0ustar liggesuserstest_that("checks for any type of output", { expect_failure(expect_silent(warning("!"))) expect_failure(expect_silent(message("!"))) expect_failure(expect_silent(print("!"))) expect_success(expect_silent("")) }) test_that("generates useful failure message", { f <- function() { warning("warning") message("message") cat("output") } expect_snapshot_failure(expect_silent(f())) }) test_that("returns first argument", { expect_equal(expect_silent(1), 1) }) test_that("deprecations are ignored", { foo <- function() { lifecycle::deprecate_warn("1.0.0", "foo()") } expect_success(expect_silent(foo)) }) testthat/tests/testthat/test-snapshot/0000755000176200001440000000000015130664352017735 5ustar liggesuserstestthat/tests/testthat/test-snapshot/test-snapshot.R0000644000176200001440000000055115042772153022676 0ustar liggesuserstest_that("errors reset snapshots", { if (nzchar(Sys.getenv("TESTTHAT_REGENERATE_SNAPS"))) { expect_snapshot(print(1)) } else { expect_snapshot(stop("failing")) } }) test_that("skips reset snapshots", { if (nzchar(Sys.getenv("TESTTHAT_REGENERATE_SNAPS"))) { expect_snapshot(print(1)) } else { expect_snapshot(skip("skipping")) } }) testthat/tests/testthat/test-snapshot/test-expect-condition.R0000644000176200001440000000021414164710003024275 0ustar liggesuserstest_that("can use failing condition expectation inside `expect_snapshot()`", { local_edition(3) expect_snapshot(expect_error(NULL)) }) testthat/tests/testthat/test-snapshot/_snaps/0000755000176200001440000000000015044143432021213 5ustar liggesuserstestthat/tests/testthat/test-snapshot/_snaps/snapshot.md0000644000176200001440000000022315127731051023373 0ustar liggesusers# errors reset snapshots Code print(1) Output [1] 1 # skips reset snapshots Code print(1) Output [1] 1 testthat/tests/testthat/test-expectation.R0000644000176200001440000000312715072252215020543 0ustar liggesuserstest_that("expectation contains failure message even when successful", { e <- expect(TRUE, "I failed") expect_equal(e$message, "I failed") }) test_that("info only evaluated on failure", { expect_no_error(expect(TRUE, "fail", info = stop("!"))) }) test_that("validates key inputs", { expect_snapshot(error = TRUE, { expect(1) expect(TRUE, 1) }) }) test_that("can subclass expectation", { exp <- new_expectation( "failure", "didn't work", .subclass = "foo", bar = "baz" ) expect_true(inherits_all( exp, c("foo", "expectation_failure", "expectation", "error", "condition") )) expect_identical(attr(exp, "bar"), "baz") }) test_that("`expect()` and `exp_signal()` signal expectations", { expect_success(expect(TRUE, "")) expect_failure(expect(FALSE, "")) expect_success(exp_signal(new_expectation("success", ""))) expect_failure(exp_signal(new_expectation("failure", ""))) }) test_that("conditionMessage() is called during conversion", { local_bindings( conditionMessage.foobar = function(...) "dispatched", .env = global_env() ) wrn <- warning_cnd("foobar", message = "wrong") expect_identical(as.expectation(wrn)$message, "dispatched") err <- error_cnd("foobar", message = "wrong") expect_match(as.expectation(err)$message, "Error: dispatched") err <- cnd(c("foobar", "skip"), message = "wrong") expect_identical(as.expectation(err)$message, "dispatched") }) test_that("error message includes call", { f <- function() stop("Error!") cnd <- catch_cnd(f()) expect_equal(format(as.expectation(cnd)), "Error in `f()`: Error!") }) testthat/tests/testthat/test-expect-condition.R0000644000176200001440000002406515072252215021500 0ustar liggesuserstest_that("returns condition or value", { expect_equal(expect_error(1, NA), 1) expect_s3_class(expect_error(stop("!")), "simpleError") }) test_that("regexp = NULL checks for presence of error", { expect_success(expect_error(stop())) f <- function() {} expect_snapshot_failure(expect_error(f())) }) test_that("regexp = NA checks for absence of error", { expect_success(expect_error({}, NA)) f <- function() stop("Yes") expect_snapshot_failure(expect_error(f(), NA)) }) test_that("regexp = string matches for error message", { expect_success(expect_error(stop("Yes"), "Yes")) expect_error(expect_error(stop("Yes"), "No")) f <- function() {} expect_snapshot_failure(expect_error(f(), "No")) }) test_that("class = string matches class of error", { blah <- function() { abort("hi", class = c("blah", "error", "condition")) } expect_success(expect_error(blah(), class = "blah")) # otherwise bubbles up expect_error(expect_error(blah(), class = "blech"), class = "blah") }) test_that("base_class must match when class is set", { foo <- function() warn("foo", class = "bar") expect_warning(expect_failure(expect_error(foo(), class = "bar"))) expect_success(expect_warning(foo(), class = "bar")) }) test_that("expect_error validates its inputs", { expect_snapshot(error = TRUE, { expect_error(stop("!"), regexp = 1) expect_error(stop("!"), class = 1) expect_error(stop("!"), inherit = "yes") }) }) test_that("... passed on to grepl", { expect_success(expect_error(stop("X"), "x", ignore.case = TRUE)) }) test_that("message method is called when expecting error", { local_bindings( conditionMessage.foobar = function(err) "dispatched!", .env = globalenv() ) fb <- function() abort("foobar", "foobar") expect_error(fb(), "dispatched!", class = "foobar") expect_snapshot_failure(expect_error(fb(), NA)) }) test_that("rlang backtrace reminders are not included in error message", { f <- function() g() g <- function() h() h <- function() abort("foo") expect_error(f(), "foo$") }) test_that("can capture Throwable conditions from rJava", { local_bindings( conditionMessage.Throwable = function(c, ...) unclass(c)$message, conditionCall.Throwable = function(c, ...) unclass(c)$call, `$.Throwable` = function(...) stop("forbidden"), `$<-.Throwable` = function(...) stop("forbidden"), .env = globalenv() ) throw <- function(msg) stop(error_cnd("Throwable", message = msg)) expect_error(throw("foo"), "foo", class = "Throwable") }) test_that("capture correct trace_env (#1994)", { # This should fail, not error status <- capture_success_failure({ stop("oops") |> expect_error() |> expect_warning() }) expect_equal(status$n_success, 1) # from expect_error() expect_equal(status$n_failure, 1) # from expect_warning() status <- capture_success_failure({ stop("oops") |> expect_error() |> expect_warning() }) expect_equal(status$n_success, 1) # from expect_error() expect_equal(status$n_failure, 1) # from expect_warning() }) # expect_warning() ---------------------------------------------------------- test_that("warnings are converted to errors when options('warn') >= 2", { withr::with_options(c(warn = 2), { expect_warning(warning("foo")) expect_error(warning("foo")) }) }) test_that("can silence warnings", { expect_warning(suppressWarnings(warning("foo")), NA) # Can't test with `expect_warning()` because the warning is still # signalled, it's just not printed # https://github.com/wch/r-source/blob/886ab4a0/src/main/errors.c#L388-L484 withr::with_options(c(warn = -1), warning("foo")) }) test_that("when checking for no warnings, exclude deprecation warnings", { foo <- function() { lifecycle::deprecate_warn("1.0.0", "foo()") } expect_warning( expect_warning(foo(), NA), class = "lifecycle_warning_deprecated" ) }) test_that("when checking for no warnings, exclude deprecation warnings (2e)", { local_edition(2) foo <- function() { options(lifecycle_verbosity = "warning") lifecycle::deprecate_warn("1.0.0", "foo()") } expect_warning( expect_warning(foo(), NA), class = "lifecycle_warning_deprecated" ) }) test_that("expect_warning validates its inputs", { expect_snapshot(error = TRUE, { expect_warning(warning("!"), regexp = 1) expect_warning(warning("!"), class = 1) expect_warning(warning("!"), inherit = "yes") expect_warning(warning("!"), all = "yes") }) }) # expect_message ---------------------------------------------------------- test_that("regexp = NA checks for absence of message", { expect_success(expect_message({}, NA)) f <- \() message("!") expect_snapshot_failure(expect_message(f(), NA)) }) test_that("expect_message validates its inputs", { expect_snapshot(error = TRUE, { expect_message(message("!"), regexp = 1) expect_message(message("!"), class = 1) expect_message(message("!"), inherit = "yes") expect_message(message("!"), all = "yes") }) }) # expect_condition -------------------------------------------------------- test_that("continues evaluation", { expect_condition({ message("Hi") new_variable <- 1 }) expect_equal(exists("new_variable"), TRUE) }) test_that("but not after error", { expect_condition({ stop("Hi") new_variable <- 1 }) expect_equal(exists("new_variable"), FALSE) }) test_that("captured condition is muffled", { expect_message(expect_condition(message("Hi")), NA) expect_warning(expect_condition(warning("Hi")), NA) expect_error(expect_condition(stop("Hi")), NA) }) test_that("condition class is included in failure", { f1 <- function() signal(class = "foo") expect_snapshot_failure(expect_condition(f1(), class = "bar")) }) test_that("only matching condition is captured, others bubble up", { f1 <- function() { message("Hi") message("Bye") } expect_condition(expect_condition(f1(), "Hi"), "Bye") expect_condition(expect_condition(f1(), "Bye"), "Hi") f2 <- function() { message("Hi") stop("Bye") } expect_error(expect_condition(f2(), "Hi"), "Bye") }) test_that("cnd expectations consistently return condition (#1371)", { f <- function(out, action) { action out } expect_s3_class(expect_message(f(NULL, message(""))), "simpleMessage") expect_s3_class(expect_warning(f(NULL, warning(""))), "simpleWarning") expect_s3_class(expect_error(f(NULL, stop(""))), "simpleError") # Used to behave differently with non-`NULL` values expect_s3_class( expect_message(f("return value", message(""))), "simpleMessage" ) expect_s3_class( expect_warning(f("return value", warning(""))), "simpleWarning" ) expect_s3_class(expect_error(f("return value", stop(""))), "simpleError") # If there is no condition expected we return the value expect_equal( expect_message(f("return value", NULL), regexp = NA), "return value" ) expect_equal( expect_warning(f("return value", NULL), regexp = NA), "return value" ) expect_equal( expect_error(f("return value", NULL), regexp = NA), "return value" ) }) test_that("cli width wrapping doesn't affect text matching", { skip_if_not_installed("cli", "3.0.2") skip_if_not_installed("rlang", "1.0.0") local_use_cli() expect_error( abort( "foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz" ), "foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz foobarbaz" ) }) test_that("can match parent conditions (#1493)", { parent <- error_cnd("foo", message = "Parent message.") f <- function() abort("Tilt.", parent = parent) expect_error(f(), class = "foo") expect_error(f(), "^Parent message.$") # Pattern and class must match the same condition expect_error(expect_error(f(), "Tilt.", class = "foo")) # Can disable parent matching expect_error(expect_error(f(), class = "foo", inherit = FALSE)) expect_error(expect_error(f(), "Parent message.", inherit = FALSE)) }) test_that("expect_condition validates its inputs", { expect_snapshot(error = TRUE, { expect_condition(stop("!"), regexp = 1) expect_condition(stop("!"), class = 1) expect_condition(stop("!"), inherit = "yes") }) }) test_that("unused arguments generate an error", { expect_snapshot(error = TRUE, { expect_condition(stop("Hi!"), foo = "bar") expect_condition(stop("Hi!"), , , "bar") expect_condition(stop("Hi!"), , , "bar", fixed = TRUE) expect_condition(stop("Hi!"), "x", foo = "bar") expect_condition(stop("Hi!"), pattern = "bar", fixed = TRUE) }) }) # second edition ---------------------------------------------------------- test_that("other conditions are swallowed", { f <- function(...) { conds <- c(...) for (cond in conds) { switch( cond, message = message("message"), warning = warning("warning"), error = stop("error"), condition = signal("signal", class = "signal") ) } } local_edition(2) # if condition text doesn't match, expectation fails (not errors) expect_failure(expect_error(f("error"), "not a match")) expect_failure(expect_warning(f("warning"), "not a match")) expect_failure(expect_message(f("message"), "not a match")) expect_failure(expect_condition(f("condition"), "not a match")) # if error/condition class doesn't match, expectation fails expect_failure(expect_error(f("error"), class = "not a match")) expect_failure(expect_condition(f("message"), class = "not a match")) # expect_message() and expect_warning() swallow all messages/warnings expect_message(expect_message(f("message", "message")), NA) expect_warning(expect_warning(f("warning", "warning")), NA) }) test_that("can match parent conditions (edition 2, #1493)", { local_edition(2) parent <- error_cnd("foo", message = "Parent message.") f <- function() abort("Tilt.", parent = parent) expect_error(f(), class = "foo") expect_error(f(), "^Parent message.$") # Can disable parent matching expect_error(expect_error(f(), class = "foo", inherit = FALSE)) expect_error(expect_error(f(), "Parent message.", inherit = FALSE)) }) testthat/tests/testthat/test-mock2.R0000644000176200001440000000455615040747540017247 0ustar liggesuserstest_that("with_mocked_bindings affects local bindings", { out <- with_mocked_bindings( test_mock_internal(), test_mock_internal2 = function() "x" ) expect_equal(out, "x") expect_equal(test_mock_internal(), "y") }) test_that("local_mocked_bindings affects local bindings", { local({ local_mocked_bindings(test_mock_internal = function() "x") expect_equal(test_mock_internal(), "x") }) expect_equal(test_mock_internal(), "y") }) test_that("unlocks and relocks binding if needed", { ns_env <- ns_env("testthat") expect_true(env_binding_are_locked(ns_env, "test_mock_direct")) local({ local_mocked_bindings(test_mock_direct = function(...) "x") expect_false(env_binding_are_locked(ns_env, "test_mock_direct")) }) expect_true(env_binding_are_locked(ns_env, "test_mock_direct")) }) test_that("can make wrapper", { local_mock_x <- function(env = caller_env()) { local_mocked_bindings(test_mock_internal2 = function() "x", .env = env) } local({ local_mock_x() expect_equal(test_mock_internal(), "x") }) expect_equal(test_mock_internal(), "y") }) test_that("with_mocked_bindings() validates its inputs", { expect_snapshot(error = TRUE, { with_mocked_bindings(1 + 1, function() 2) }) }) # ------------------------------------------------------------------------- test_that("can mock directly", { local_mocked_bindings(test_mock_direct = function(...) "x") expect_equal(test_mock_direct(), "x") }) test_that("can mock bindings from imports", { local_mocked_bindings(sym = function(...) "x") expect_equal(test_mock_imports(), "x") }) test_that("can mock bindings in another package", { local_mocked_bindings(sym = function(...) "x", .package = "rlang") expect_equal(test_mock_namespaced(), "x") }) test_that("can mock S3 methods", { local({ local_mocked_bindings(test_mock_method.integer = function(...) "x") expect_equal(test_mock_method(1L), "x") }) expect_equal(test_mock_method(1L), "y") }) test_that("can't mock bindings that don't exist", { expect_snapshot(local_mocked_bindings(f = function() "x"), error = TRUE) }) test_that("can mock base functions with in-package bindings", { local_mocked_bindings(interactive = function() TRUE) expect_equal(test_mock_base(), TRUE) }) test_that("can mock values", { local_mocked_bindings(test_mock_value = 100) expect_equal(test_mock_value, 100) }) testthat/tests/testthat/test-srcrefs.R0000644000176200001440000000467315053661134017701 0ustar liggesuserssrcref_line <- function(code) { srcref <- attr(substitute(code), "srcref") if (!is.list(srcref)) { stop("code doesn't have srcref", call. = FALSE) } results <- with_reporter("silent", code)$expectations() unlist(lapply(results, function(x) x$srcref[1])) - srcref[[1]][1] } test_that("line numbers captured for expectations and warnings", { f <- function() warning("Uh oh") lines <- srcref_line({ test_that("simple", { # line 1 expect_true(FALSE) # line 2 f() # line 3 }) }) expect_equal(lines, c(2, 3)) }) test_that("line numbers captured when called indirectly", { lines <- srcref_line({ test_that("simple", { # line 1 f <- function() g() # line 2 g <- function() h() # line 3 h <- function() expect_true(FALSE) # line 4 # line 5 h() # line 6 }) }) expect_equal(lines, 4) lines <- srcref_line({ f <- function() g() # line 1 g <- function() h() # line 2 h <- function() expect_true(FALSE) # line 3 test_that("simple", { # line 4 h() # line 5 }) }) expect_equal(lines, 5) }) test_that("line numbers captured inside a loop", { lines <- srcref_line({ test_that("simple", { # line 1 for (i in 1:4) expect_true(TRUE) # line 2 }) }) expect_equal(lines, rep(2, 4)) }) test_that("line numbers captured for skip()s and stops()", { lines <- srcref_line({ test_that("simple", { # line 1 skip("Not this time") # line 2 }) # line 3 }) expect_equal(lines, 2) lines <- srcref_line({ test_that("simple", { # line 1 stop("Not this time") # line 2 }) # line 3 }) expect_equal(lines, 2) }) test_that("line numbers captured for on.exit()", { lines <- srcref_line({ test_that("simple", { # line 1 on.exit({stop("Error")}) # line 2 }) # line 3 }) expect_equal(lines, 2) # Falls back to test if no srcrf lines <- srcref_line({ test_that("simple", { # line 1 on.exit(stop("Error")) # line 2 }) # line 3 }) expect_equal(lines, 1) }) testthat/tests/testthat/test-parallel.R0000644000176200001440000001311615054145645020023 0ustar liggesuserstest_that("detect number of cpus to use", { withr::local_options(Ncpus = 100L) withr::local_envvar(TESTTHAT_CPUS = NA) expect_equal(default_num_cpus(), 100L) withr::local_options(Ncpus = 100L) withr::local_envvar(TESTTHAT_CPUS = 10) expect_equal(default_num_cpus(), 100L) withr::local_options(list(Ncpus = NULL)) withr::local_envvar(TESTTHAT_CPUS = NA) expect_equal(default_num_cpus(), 2L) withr::local_options(list(Ncpus = NULL)) withr::local_envvar(TESTTHAT_CPUS = NA) expect_equal(default_num_cpus(), 2L) withr::local_options(list(Ncpus = NULL)) withr::local_envvar(TESTTHAT_CPUS = 13) expect_equal(default_num_cpus(), 13L) }) test_that("good error if bad option", { withr::local_options(Ncpus = "bad") expect_snapshot(default_num_cpus(), error = TRUE) }) test_that("ok", { skip_on_covr() withr::local_envvar(c(TESTTHAT_PARALLEL = "TRUE")) # we cannot run these with the silent reporter, because it is not # parallel compatible, and they'll not run in parallel capture.output(suppressMessages( ret <- test_local( test_path("test-parallel", "ok"), reporter = "summary", stop_on_failure = FALSE ) )) tdf <- as.data.frame(ret) tdf <- tdf[order(tdf$file), ] expect_equal(tdf$failed, c(0, 1, 0)) expect_equal(tdf$skipped, c(FALSE, FALSE, TRUE)) }) test_that("fail", { skip_on_covr() withr::local_envvar(c(TESTTHAT_PARALLEL = "TRUE")) # we cannot run these with the silent reporter, because it is not # parallel compatible, and they'll not run in parallel capture.output(suppressMessages( ret <- test_local( test_path("test-parallel", "fail"), reporter = "summary", stop_on_failure = FALSE ) )) tdf <- as.data.frame(ret) tdf <- tdf[order(tdf$file), ] expect_equal(tdf$failed, c(1)) }) test_that("snapshots", { skip_on_covr() skip_on_cran() withr::local_envvar(c(TESTTHAT_PARALLEL = "TRUE")) tmp <- withr::local_tempdir("testthat-snap-") file.copy(test_path("test-parallel", "snap"), tmp, recursive = TRUE) # we cannot run these with the silent reporter, because it is not # parallel compatible, and they'll not run in parallel capture.output(suppressMessages( ret <- test_local( file.path(tmp, "snap"), reporter = "summary", stop_on_failure = FALSE ) )) tdf <- as.data.frame(ret) tdf <- tdf[order(tdf$file), ] expect_equal(tdf$failed, c(0, 0, 1)) snaps <- file.path(tmp, "snap", "tests", "testthat", "_snaps") expect_true(file.exists(file.path(snaps, "snap-1.md"))) expect_true(file.exists(file.path(snaps, "snap-2.md"))) expect_true(file.exists(file.path(snaps, "snap-3.md"))) }) test_that("new snapshots are added", { skip_on_covr() skip_on_cran() withr::local_envvar(c(TESTTHAT_PARALLEL = "TRUE", CI = "false")) tmp <- withr::local_tempdir("testthat-snap-") file.copy(test_path("test-parallel", "snap"), tmp, recursive = TRUE) unlink(file.path(tmp, "snap", "tests", "testthat", "_snaps", "snap-2.md")) # we cannot run these with the silent reporter, because it is not # parallel compatible, and they'll not run in parallel capture.output(suppressMessages( ret <- test_local( file.path(tmp, "snap"), reporter = "summary", stop_on_failure = FALSE ) )) tdf <- as.data.frame(ret) tdf <- tdf[order(tdf$file), ] expect_equal(tdf$failed, c(0, 0, 1)) snaps <- file.path(tmp, "snap", "tests", "testthat", "_snaps") expect_true(file.exists(file.path(snaps, "snap-1.md"))) expect_true(file.exists(file.path(snaps, "snap-2.md"))) expect_true(file.exists(file.path(snaps, "snap-3.md"))) }) test_that("snapshots are removed if test file has no snapshots", { skip_on_covr() skip_on_cran() withr::local_envvar(c(TESTTHAT_PARALLEL = "TRUE")) tmp <- withr::local_tempdir("testthat-snap-") file.copy(test_path("test-parallel", "snap"), tmp, recursive = TRUE) writeLines( "test_that(\"2\", { expect_true(TRUE) })", file.path(tmp, "snap", "tests", "testthat", "test-snap-2.R") ) # we cannot run these with the silent reporter, because it is not # parallel compatible, and they'll not run in parallel capture.output(suppressMessages( ret <- test_local( file.path(tmp, "snap"), reporter = "summary", stop_on_failure = FALSE ) )) tdf <- as.data.frame(ret) tdf <- tdf[order(tdf$file), ] expect_equal(tdf$failed, c(0, 0, 1)) snaps <- file.path(tmp, "snap", "tests", "testthat", "_snaps") expect_true(file.exists(file.path(snaps, "snap-1.md"))) expect_false(file.exists(file.path(snaps, "snap-2.md"))) expect_true(file.exists(file.path(snaps, "snap-3.md"))) }) test_that("snapshots are removed if test file is removed", { skip_on_covr() skip_on_cran() withr::local_envvar(c(TESTTHAT_PARALLEL = "TRUE")) withr::defer(unlink(tmp, recursive = TRUE)) dir.create(tmp <- tempfile("testthat-snap-")) file.copy(test_path("test-parallel", "snap"), tmp, recursive = TRUE) unlink(file.path(tmp, "snap", "tests", "testthat", "test-snap-2.R")) withr::local_envvar(CI = NA_character_) # we cannot run these with the silent reporter, because it is not # parallel compatible, and they'll not run in parallel capture.output(suppressMessages( ret <- test_local( file.path(tmp, "snap"), reporter = "summary", stop_on_failure = FALSE ) )) tdf <- as.data.frame(ret) tdf <- tdf[order(tdf$file), ] expect_equal(tdf$failed, c(0, 1)) snaps <- file.path(tmp, "snap", "tests", "testthat", "_snaps") expect_true(file.exists(file.path(snaps, "snap-1.md"))) expect_false(file.exists(file.path(snaps, "snap-2.md"))) expect_true(file.exists(file.path(snaps, "snap-3.md"))) }) testthat/tests/testthat/test-teardown/0000755000176200001440000000000015130664352017721 5ustar liggesuserstestthat/tests/testthat/test-teardown/test-teardown.R0000644000176200001440000000026614313316267022651 0ustar liggesuserslocal_edition(2) setup(brio::write_lines("test", "teardown.txt")) teardown(file.remove("teardown.txt")) test_that("file is created", { expect_true(file.exists("teardown.txt")) }) testthat/tests/testthat/test-local.R0000644000176200001440000000343415047715224017321 0ustar liggesuserstest_that("local context is 'as promised' inside test_that()", { # high-level expectations or expectations that preceded testthat 3e expect_true(is_testing()) expect_equal(testing_package(), "testthat") expect_false(is_interactive()) expect_equal(Sys.getenv("R_TESTS"), "") # set in local_test_context() expect_equal(Sys.getenv("TESTTHAT"), "true") # testthat 3e, set in local_reproducible_output() expect_equal(edition_get(), 3L) expect_equal(getOption("width"), 80) expect_false(getOption("crayon.enabled")) expect_false(getOption("cli.dynamic")) expect_false(getOption("cli.unicode")) expect_equal(getOption("lifecycle_verbosity"), "warning") expect_equal(getOption("OutDec"), ".") expect_false(getOption("rlang_interactive")) expect_false(getOption("useFancyQuotes")) expect_equal(getOption("max.print"), 99999) expect_equal(Sys.getenv("RSTUDIO"), "") expect_equal(Sys.getenv("LANGUAGE"), "C") expect_equal(Sys.getlocale("LC_COLLATE"), "C") }) test_that("can override usual options", { local_test_directory(tempdir(), "methods") expect_equal(testing_package(), "methods") }) test_that("can override translation of error messages", { skip_on_cran() skip_on_os("linux") local_reproducible_output(lang = "fr") expect_error(mean[[1]], "objet de type") local_reproducible_output(lang = "es") expect_error(mean[[1]], "objeto de tipo") }) test_that("can force cli to display RStudio style hyperlinks", { expect_snapshot({ str(cli::ansi_hyperlink_types()) }) local_reproducible_output(crayon = TRUE, hyperlinks = TRUE, rstudio = TRUE) expect_snapshot({ str(cli::ansi_hyperlink_types()) }) }) test_that("browser() usages are errors in tests", { skip_if(getRversion() < "4.3.0") if (!interactive()) { expect_error(browser()) } }) testthat/tests/testthat/test-snapshot-file-snaps.R0000644000176200001440000000442315045650711022121 0ustar liggesuserstest_that("append manages current snapshot index", { path <- withr::local_tempdir() snaps <- FileSnaps$new(path, "file") i <- snaps$append("test1", "_default", 1) expect_equal(i, 1) i <- snaps$append("test1", "_default", 2) expect_equal(i, 2) i <- snaps$append("test1", "windows", 3) expect_equal(i, 1) i <- snaps$append("test1", "windows", 3) expect_equal(i, 2) i <- snaps$append("test2", "_default", 3) expect_equal(i, 1) }) test_that("can retrieve appended snaps", { snaps <- FileSnaps$new(withr::local_tempdir(), "file") snaps$append("test1", "_default", "1") snaps$append("test1", "_default", "2") snaps$append("test2", "_default", "3") snaps$append("test2", "windows", "4") expect_equal(snaps$get("test1", "_default", 2), "2") expect_equal(snaps$get("test2", "_default", 1), "3") expect_equal(snaps$get("test2", "windows", 1), "4") # Returns NULL if don't exist expect_equal(snaps$get("MISSING", "_default", 1), NULL) expect_equal(snaps$get("test1", "MISSING", 1), NULL) expect_equal(snaps$get("test1", "_default", 100), NULL) }) test_that("can reset snapshots", { snaps1 <- FileSnaps$new(withr::local_tempdir(), "file") snaps2 <- FileSnaps$new(withr::local_tempdir(), "file") snaps1$append("test1", "_default", "1") snaps1$append("test1", "_default", "2") snaps1$append("test2", "_default", "3") snaps2$append("test1", "_default", "4") snaps2$reset("test1", snaps1) expect_equal(snaps2$snaps$`_default`$test1, c("4", "2")) # And can copy complete snapshot snaps2$reset("test2", snaps1) expect_equal(snaps2$snaps$`_default`$test2, "3") # And shouldn't change if we reset again snaps2$reset("test1", snaps1) expect_equal(snaps2$snaps$`_default`$test1, c("4", "2")) }) test_that("can round trip cur to old snaps", { path <- withr::local_tempdir() cur <- FileSnaps$new(path, "file", "cur") cur$append("test1", "_default", "1") cur$append("test2", "_default", "2") cur$append("test2", "windows", "3") cur$write() old <- FileSnaps$new(path, "file", "old") expect_equal(cur$snaps, old$snaps) }) test_that("snaps delete default variant if no snaps", { snaps <- FileSnaps$new(withr::local_tempdir(), "file", "cur") brio::write_lines("x", snaps$path()) snaps$write() expect_false(file.exists(snaps$path())) }) testthat/tests/testthat/test-expect-named.R0000644000176200001440000000322215072252215020566 0ustar liggesuserstest_that("expected_named verifies presence of names", { expect_success(expect_named(c(a = 1))) x <- 1:10 expect_snapshot_failure(expect_named(x)) }) test_that("expected_named verifies actual of names", { expect_success(expect_named(c(a = 1), "a")) x <- c(a = 1) expect_snapshot_failure(expect_named(x, "b")) }) test_that("always returns inputs", { x <- c(a = 1) expect_equal(expect_named(x), x) expect_equal(expect_named(x, "a"), x) expect_equal(expect_named(x, "a", ignore.order = TRUE), x) }) test_that("expected_named optionally ignores order and case", { x <- c(a = 1, b = 2) expect_success(expect_named(x, c("A", "B"), ignore.case = TRUE)) expect_success(expect_named(x, c("b", "a"), ignore.order = TRUE)) }) test_that("provide useful feedback on failure", { x1 <- c(a = 1) x2 <- c(a = 1, b = 2) expect_snapshot_failure(expect_named(x1, c("a", "b"), ignore.order = TRUE)) expect_snapshot_failure(expect_named(x2, "a", ignore.order = TRUE)) expect_snapshot_failure(expect_named(x1, "b", ignore.order = TRUE)) expect_snapshot_failure(expect_named(x1, c("a", "b"), ignore.order = FALSE)) expect_snapshot_failure(expect_named(x2, "a", ignore.order = FALSE)) expect_snapshot_failure(expect_named(x1, "b", ignore.order = FALSE)) }) test_that("expect_named validates its inputs", { expect_snapshot(error = TRUE, { expect_named(c(a = 1), "a", ignore.order = "yes") expect_named(c(a = 1), "a", ignore.case = "yes") }) }) test_that("expect_named accepts glue for 'expected'", { n <- structure( c("v1", "v2", "v3", "v4", "v5"), class = c("glue", "character") ) v <- set_names(1:5, n) expect_named(v, n) }) testthat/tests/testthat/test-expect-comparison.R0000644000176200001440000000560515104404205021655 0ustar liggesuserstest_that("basic comparisons work", { x <- 10 expect_success(expect_lt(x, 11)) expect_snapshot_failure(expect_lt(x, 10)) expect_success(expect_lte(x, 10)) expect_success(expect_gt(11, 10)) expect_snapshot_failure(expect_gt(x, 10)) expect_success(expect_gte(x, 10)) }) test_that("useful output when numbers are very small", { x <- 1e-5 expect_snapshot_failure(expect_lte(1.1 * x, x)) expect_snapshot_failure(expect_gt(x, 1.1 * x)) }) test_that("useful output when difference is zero", { x <- 100 expect_snapshot_failure(expect_lt(x, 100)) }) test_that("useful output when differnce is large", { x <- 100 expect_snapshot_failure(expect_lt(x, 0.001)) }) test_that("comparison result object invisibly", { out <- expect_invisible(expect_lt(1, 10)) expect_equal(out, 1) }) test_that("comparisons with Inf work", { expect_success(expect_lt(10, Inf)) expect_failure(expect_lt(Inf, Inf)) expect_success(expect_lte(Inf, Inf)) expect_success(expect_gt(Inf, 10)) expect_failure(expect_gt(Inf, Inf)) expect_success(expect_gte(Inf, Inf)) x <- Inf expect_snapshot_failure(expect_lt(x, Inf)) }) test_that("comparisons with NA work", { expect_failure(expect_lt(10, NA_real_)) expect_failure(expect_lt(NA_real_, 10)) expect_failure(expect_lt(NA_real_, NA_real_)) expect_failure(expect_lte(NA_real_, NA_real_)) expect_failure(expect_gt(10, NA_real_)) expect_failure(expect_gt(NA_real_, 10)) expect_failure(expect_gt(NA_real_, NA_real_)) expect_failure(expect_gte(NA_real_, NA_real_)) x <- NA_real_ expect_snapshot_failure(expect_lt(x, 10)) }) test_that("comparisons with negative numbers work", { expect_success(expect_lt(-5, -2)) expect_snapshot_failure(expect_gt(-5, -2)) }) test_that("comparisons with POSIXct objects work", { time <- as.POSIXct("2020-01-01 01:00:00") time2 <- time + 1.5 expect_success(expect_lt(time, time2)) # set digits.secs = 1 to ensure consistent output with older R versions withr::with_options(c(digits.secs = 1), { expect_snapshot_failure(expect_lt(time2, time)) }) }) test_that("comparisons with Date objects work", { date <- as.Date("2020-01-01") date2 <- date + 1 expect_success(expect_gt(date2, date)) expect_success(expect_gte(date2, date)) expect_snapshot_failure(expect_gt(date, date2)) }) test_that("comparisons of date/time with NA work", { time <- as.POSIXct("2020-01-01 01:00:00") date <- as.Date("2020-01-01") expect_failure(expect_lt(time, NA)) expect_failure(expect_gt(date, NA)) }) test_that("comparisons with character objects work", { expect_success(expect_lte("a", "b")) expect_snapshot_failure(expect_lte("b", "a")) }) test_that("comparison must yield a single logical", { expect_snapshot(error = TRUE, expect_lt(1:10, 5)) }) test_that("wordy versions are deprecated", { expect_warning(expect_less_than(1, 2), "Deprecated") expect_warning(expect_more_than(2, 1), "Deprecated") }) testthat/tests/testthat/test-error/0000755000176200001440000000000015127731052017225 5ustar liggesuserstestthat/tests/testthat/test-error/test-error.R0000644000176200001440000000006714313316267021464 0ustar liggesuserstest_that("should fail", { expect_equal(1 + 1, 3) }) testthat/tests/testthat/test-verify-conditions.txt0000644000176200001440000000102215127731052022303 0ustar liggesusers> message("Message") Message: Message > # With calls > warning("Warning") Warning: Warning > stop("Error") Error: Error > # Without calls > warning("Warning", call. = FALSE) Warning: Warning > stop("Error", call. = FALSE) Error: Error > # With `conditionMessage()` method > cnd_signal(message_cnd("foobar", message = "Message")) Message: Dispatched! Message > cnd_signal(warning_cnd("foobar", message = "Warning")) Warning: Dispatched! Warning > cnd_signal(error_cnd("foobar", message = "Error")) Error: Dispatched! Error testthat/tests/testthat/test-reporter-debug.R0000644000176200001440000001165515111023111021134 0ustar liggesuserstest_that("produces consistent output", { withr::local_options(testthat.edition_ignore = TRUE) local_edition(2) local_mocked_bindings( show_menu = function(choices, title = NULL) { cat( paste0(format(seq_along(choices)), ": ", choices, sep = "\n"), "\n", sep = "" ) 0L }, sink_number = function() 0L ) withr::local_options(testthat_format_srcrefs = FALSE) expect_snapshot_reporter(DebugReporter$new()) }) get_vars_from_debug_reporter <- function(choice, fun, envir = parent.frame()) { frame <- get_frame_from_debug_reporter(choice, fun, envir) ls(frame) } get_frame_from_debug_reporter <- function(choice, fun, envir = parent.frame()) { local_edition(2) force(choice) test_debug_reporter_parent_frame <- NULL with_mocked_bindings( show_menu = function(choices, title = NULL) { # if (choice > 0) print(choices) my_choice <- choice choice <<- 0L my_choice }, browse_frame = function(frame, skip) { test_debug_reporter_parent_frame <<- frame }, sink_number = function() 0L, with_reporter( "debug", test_that("debug_reporter_test", { fun() }) ) ) test_debug_reporter_parent_frame } success_fun <- function() { aa <- 1 expect_true(TRUE) } test_that("debug reporter is not called for successes", { expect_null(get_frame_from_debug_reporter(2, success_fun)) }) test_that("browser() is called for the correct frame for failures", { fun_1 <- function() { aa <- 1 expect_true(FALSE) } fun_2 <- function() { f <- function() expect_true(FALSE) f() } fun_3 <- function() { f <- function() { g <- function() expect_true(FALSE) g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) test_that("browser() is called for the correct frame for warnings", { fun_1 <- function() { aa <- 1 warning("warn") } fun_2 <- function() { f <- function() warning("warn") f() } fun_3 <- function() { f <- function() { g <- function() warning("warn") g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) test_that("browser() is called for the correct frame for errors", { fun_1 <- function() { aa <- 1 stop("error") } fun_2 <- function() { f <- function() stop("error") f() } fun_3 <- function() { f <- function() { g <- function() stop("error") g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) test_that("browser() is called for the correct frame for skips", { fun_1 <- function() { aa <- 1 skip("skip") } fun_2 <- function() { f <- function() skip("skip") f() } fun_3 <- function() { f <- function() { g <- function() skip("skip") g() } f() } expect_equal(get_vars_from_debug_reporter(1, fun_1), character()) expect_equal(get_vars_from_debug_reporter(2, fun_1), "aa") expect_equal(get_vars_from_debug_reporter(1, fun_2), character()) expect_equal(get_vars_from_debug_reporter(2, fun_2), "f") expect_equal(get_vars_from_debug_reporter(3, fun_2), character()) expect_equal(get_vars_from_debug_reporter(1, fun_3), character()) expect_equal(get_vars_from_debug_reporter(2, fun_3), "f") expect_equal(get_vars_from_debug_reporter(3, fun_3), "g") expect_equal(get_vars_from_debug_reporter(4, fun_3), character()) }) testthat/tests/testthat/test-try-again.R0000644000176200001440000000111715104404205020102 0ustar liggesuserssucceed_after <- function(i) { function() { i <<- i - 1 expect_equal(i, 0) } } test_that("tries multiple times", { third_try <- succeed_after(3) expect_snapshot(result <- try_again(3, third_try())) expect_equal(result, 0) third_try <- succeed_after(3) expect_snapshot(try_again(1, third_try()), error = TRUE) }) test_that("handles errors", { fails_twice <- local({ i <- 0 function() { i <<- i + 1 if (i <= 2) stop("fail") else 1 } }) expect_snapshot(result <- try_again(3, expect_equal(fails_twice(), 1))) expect_equal(result, 1) }) testthat/tests/testthat/test-verify-output.txt0000644000176200001440000000052615127731051021501 0ustar liggesusers> # Output > 1 + 2 [1] 3 > invisible(1:10) > 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + 12345678 + + 12345678 + 12345678 + 12345678 + 12345678 [1] 135802458 Header ====== > # Other output > letters [1] "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" "l" "m" "n" "o" "p" "q" "r" "s" [20] "t" "u" "v" "w" "x" "y" "z" testthat/tests/testthat/test-reporter-rstudio.R0000644000176200001440000000013214164710003021535 0ustar liggesuserstest_that("reporter basics works", { expect_snapshot_reporter(RStudioReporter$new()) }) testthat/tests/testthat/_snaps/0000755000176200001440000000000015127561732016410 5ustar liggesuserstestthat/tests/testthat/_snaps/expect-no-condition.md0000644000176200001440000000471315127731047022623 0ustar liggesusers# expect_no_* conditions behave as expected Code expect_no_error(stop("error")) Condition Error: ! Expected `stop("error")` not to throw any errors. Actually got a with message: error --- Code expect_no_warning(warning("warning")) Condition Error: ! Expected `warning("warning")` not to throw any warnings. Actually got a with message: warning --- Code expect_no_message(message("message")) Condition Error: ! Expected `message("message")` not to throw any messages. Actually got a with message: message --- Code expect_no_error(abort("error")) Condition Error: ! Expected `abort("error")` not to throw any errors. Actually got a with message: error --- Code expect_no_warning(warn("warning")) Condition Error: ! Expected `warn("warning")` not to throw any warnings. Actually got a with message: warning --- Code expect_no_message(inform("message")) Condition Error: ! Expected `inform("message")` not to throw any messages. Actually got a with message: message # expect_no_* don't emit success when they fail Code expect_no_error(stop("!")) Condition Error: ! Expected `stop("!")` not to throw any errors. Actually got a with message: ! # matched conditions give informative message Code expect_no_warning(foo()) Condition Error: ! Expected `foo()` not to throw any warnings. Actually got a with message: This is a problem! Code expect_no_warning(foo(), message = "problem") Condition Error: ! Expected `foo()` not to throw any warnings matching pattern 'problem'. Actually got a with message: This is a problem! Code expect_no_warning(foo(), class = "test") Condition Error: ! Expected `foo()` not to throw any warnings of class 'test'. Actually got a with message: This is a problem! Code expect_no_warning(foo(), message = "problem", class = "test") Condition Error: ! Expected `foo()` not to throw any warnings of class 'test' matching pattern 'problem'. Actually got a with message: This is a problem! testthat/tests/testthat/_snaps/reporter.md0000644000176200001440000000013215127731051020561 0ustar liggesusers# can control output with file arg/option [1] ".FFEESSWS" --- [1] ".FFEESSWS" testthat/tests/testthat/_snaps/R4.3/0000755000176200001440000000000015047110120017014 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.3/snapshot-file/0000755000176200001440000000000015047110120021570 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.3/snapshot-file/version.txt0000644000176200001440000000000515047110120024011 0ustar liggesusersR4.3 testthat/tests/testthat/_snaps/R4.3/snapshot.md0000644000176200001440000000013215047110120021171 0ustar liggesusers# variants save different values Code r_version() Output [1] "R4.3" testthat/tests/testthat/_snaps/verify-output.md0000644000176200001440000000022315127731052021563 0ustar liggesusers# can't record plots Code verify_output(tempfile(), plot(1:10)) Condition Error in `FUN()`: ! Plots are not supported. testthat/tests/testthat/_snaps/R4.1/0000755000176200001440000000000015047110120017012 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.1/snapshot-file/0000755000176200001440000000000015047110120021566 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.1/snapshot-file/version.txt0000644000176200001440000000000515047110120024007 0ustar liggesusersR4.1 testthat/tests/testthat/_snaps/R4.1/snapshot.md0000644000176200001440000000013215047110120021167 0ustar liggesusers# variants save different values Code r_version() Output [1] "R4.1" testthat/tests/testthat/_snaps/test-state.md0000644000176200001440000000111715127731051021020 0ustar liggesusers# set_state_inspector() verifies its inputs Code set_state_inspector(function(x) 123) Condition Error in `set_state_inspector()`: ! `callback` must be a zero-arg function, or NULL # can detect state changes [ FAIL 0 | WARN 1 | SKIP 0 | PASS 1 ] == Warnings ==================================================================== -- Warning ('reporters/state-change.R:1:1'): options --------------------------- Global state has changed: `before$x` is NULL `after$x` is a double vector (1) [ FAIL 0 | WARN 1 | SKIP 0 | PASS 1 ] testthat/tests/testthat/_snaps/R4.4/0000755000176200001440000000000015047715224017035 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.4/snapshot-file/0000755000176200001440000000000015047715224021611 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.4/snapshot-file/version.txt0000644000176200001440000000000515047715224024032 0ustar liggesusersR4.4 testthat/tests/testthat/_snaps/R4.4/snapshot.md0000644000176200001440000000013215047715224021212 0ustar liggesusers# variants save different values Code r_version() Output [1] "R4.4" testthat/tests/testthat/_snaps/skip.md0000644000176200001440000000375115127731051017677 0ustar liggesusers# basic skips work as expected Reason: Skipping --- Reason: TRUE is TRUE --- Reason: FALSE is not TRUE --- Reason: empty test # autogenerated message is always single line Reason: a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name || a_very_long_argument_name is not TRUE # skip_if_not_installed() works as expected, absence and version Reason: {doesntexist} is not installed --- Reason: Installed testthat is version 3.0.0; but 9999.9999.999 is required # skip_if_not_installed() works as expected, offline Reason: offline # skip_on_ci() works as expected Reason: On CI # skip_on_covr() works as expected Reason: On covr # skip_on_bioc() works as expected Reason: On Bioconductor # superseded CI skips still work Reason: On Travis --- Reason: On Appveyor # skip_if_translated() works as expected Reason: "'%s' not found" is translated # skip on os checks os names Code skip_on_os("amiga") Condition Error in `match.arg()`: ! 'arg' should be one of "windows", "mac", "linux", "solaris", "emscripten" # can skip on multiple oses Reason: On Windows --- Reason: On Windows # can refine os with arch Reason: On Windows --- Reason: On Windows i386 # skip_unless_r works as expected Code skip_unless_r("idfjdij") Condition Error in `skip_unless_r()`: ! `spec` must be an valid version specification, like ">= 4.0.0", not "idfjdij". # skip_unless_r gives the expected output Reason: Current R version (4.5.0) does not satisfy requirement (>= 999.999.999) --- Reason: Current R version (4.5.0) does not satisfy requirement (== 0.0.0) testthat/tests/testthat/_snaps/extract.md0000644000176200001440000000343415127731050020400 0ustar liggesusers# can extract test from file Code base::writeLines(readLines(out_path)) Output # Extracted from extract/simple.R:3 # setup ------------------------------------------------------------------------ library(testthat) test_env <- simulate_test_env(package = "testthat", path = "..") attach(test_env, warn.conflicts = FALSE) # test ------------------------------------------------------------------------- expect_true(TRUE) # can include test env setup Code base::writeLines(extract_test_lines(exprs, 2, "test")) Output # setup ------------------------------------------------------------------------ library(testthat) test_env <- simulate_test_env(package = "test", path = "..") attach(test_env, warn.conflicts = FALSE) # test ------------------------------------------------------------------------- expect_true(TRUE) # can extract prequel Code base::writeLines(extract_test_lines(exprs, 4)) Output # prequel ---------------------------------------------------------------------- x <- 1 y <- 2 # test ------------------------------------------------------------------------- expect_true(TRUE) # preserves code format but not comments Code base::writeLines(extract_test_lines(exprs, 3)) Output # prequel ---------------------------------------------------------------------- 1 + 1 # test ------------------------------------------------------------------------- 2 + 2 # can extract selected expectation Code base::writeLines(extract_test_lines(exprs, 2)) Output # test ------------------------------------------------------------------------- expect_true(TRUE) testthat/tests/testthat/_snaps/parallel.md0000644000176200001440000000021315127731051020513 0ustar liggesusers# good error if bad option Code default_num_cpus() Condition Error: ! `getOption('Ncpus')` must be an integer. testthat/tests/testthat/_snaps/local.md0000644000176200001440000000107715127731050020021 0ustar liggesusers# can force cli to display RStudio style hyperlinks Code str(cli::ansi_hyperlink_types()) Output List of 4 $ href : logi FALSE $ run : logi FALSE $ help : logi FALSE $ vignette: logi FALSE --- Code str(cli::ansi_hyperlink_types()) Output List of 4 $ href : logi TRUE $ run : logi TRUE ..- attr(*, "type")= chr "rstudio" $ help : logi TRUE ..- attr(*, "type")= chr "rstudio" $ vignette: logi TRUE ..- attr(*, "type")= chr "rstudio" testthat/tests/testthat/_snaps/reporter-debug.md0000644000176200001440000000075715127731050021661 0ustar liggesusers# produces consistent output 1: expect_true(x) 2: expect_waldo_constant_(act, exp, info = info, ignore_attr = TRUE) 3: fail(msg, info = info, trace_env = trace_env) 1: f() 2: expect_true(FALSE) 3: expect_waldo_constant_(act, exp, info = info, ignore_attr = TRUE) 4: fail(msg, info = info, trace_env = trace_env) 1: stop("stop") 1: f() 2: g() 3: h() 4: stop("!") 1: skip("skip") 1: f() 2: warning("def") testthat/tests/testthat/_snaps/expect-comparison.md0000644000176200001440000000467315127731047022402 0ustar liggesusers# basic comparisons work Code expect_lt(x, 10) Condition Error: ! Expected `x` < 10. Actual comparison: 10.0 >= 10.0 Difference: 0.0 >= 0 --- Code expect_gt(x, 10) Condition Error: ! Expected `x` > 10. Actual comparison: 10.0 <= 10.0 Difference: 0.0 <= 0 # useful output when numbers are very small Code expect_lte(1.1 * x, x) Condition Error: ! Expected `1.1 * x` <= `x`. Actual comparison: 0.0000110 > 0.0000100 Difference: 0.0000010 > 0 --- Code expect_gt(x, 1.1 * x) Condition Error: ! Expected `x` > `1.1 * x`. Actual comparison: 0.0000100 <= 0.0000110 Difference: -0.0000010 <= 0 # useful output when difference is zero Code expect_lt(x, 100) Condition Error: ! Expected `x` < 100. Actual comparison: 100.0 >= 100.0 Difference: 0.0 >= 0 # useful output when differnce is large Code expect_lt(x, 0.001) Condition Error: ! Expected `x` < 0.001. Actual comparison: 100.000 >= 0.001 Difference: 99.999 >= 0 # comparisons with Inf work Code expect_lt(x, Inf) Condition Error: ! Expected `x` < Inf. Actual comparison: Inf >= Inf # comparisons with NA work Code expect_lt(x, 10) Condition Error: ! Expected `x` < 10. Actual comparison: NA >= 10.0 # comparisons with negative numbers work Code expect_gt(-5, -2) Condition Error: ! Expected `-5` > `-2`. Actual comparison: -5.0 <= -2.0 Difference: -3.0 <= 0 # comparisons with POSIXct objects work Code expect_lt(time2, time) Condition Error: ! Expected `time2` < `time`. Actual comparison: "2020-01-01 01:00:01.5" >= "2020-01-01 01:00:00" Difference: 1.5 secs >= 0 secs # comparisons with Date objects work Code expect_gt(date, date2) Condition Error: ! Expected `date` > `date2`. Actual comparison: "2020-01-01" <= "2020-01-02" Difference: -1.0 days <= 0 days # comparisons with character objects work Code expect_lte("b", "a") Condition Error: ! Expected "b" <= "a". Actual comparison: "b" > "a" # comparison must yield a single logical Code expect_lt(1:10, 5) Condition Error in `expect_lt()`: ! Result of comparison must be `TRUE`, `FALSE`, or `NA` testthat/tests/testthat/_snaps/examples.md0000644000176200001440000000023515127731046020545 0ustar liggesusers# test_examples fails if no examples Code test_examples("asdf") Condition Error in `test_examples()`: ! Could not find examples. testthat/tests/testthat/_snaps/expect-vector.md0000644000176200001440000000071115127731050021511 0ustar liggesusers# basic properties upheld Code expect_vector(x, size = 5) Condition Error: ! `x` must have size 5, not size 10. --- Code expect_vector(y) Condition Error: ! `y` must be a vector, not `NULL`. # expect_vector validates its inputs Code expect_vector(1:5, size = "large") Condition Error in `expect_vector()`: ! `size` must be a whole number or `NULL`, not the string "large". testthat/tests/testthat/_snaps/snapshot-file/0000755000176200001440000000000015130664352021160 5ustar liggesuserstestthat/tests/testthat/_snaps/snapshot-file/foo-not-binary.csv0000644000176200001440000000025715054053615024543 0ustar liggesusers"","mpg","cyl","disp","hp" "Mazda RX4",21,6,160,110 "Mazda RX4 Wag",21,6,160,110 "Datsun 710",22.8,4,108,93 "Hornet 4 Drive",21.4,6,258,110 "Hornet Sportabout",18.7,8,360,175 testthat/tests/testthat/_snaps/snapshot-file/foo.png0000644000176200001440000000542715047135475022467 0ustar liggesusersPNG  IHDR,,N~GPLTE888999:::;;;<<<===>>>???@@@AAABBBCCCDDDEEEFFFGGGHHHIIIJJJKKKLLLMMMNNNOOOPPPQQQRRRSSSTTTUUUVVVWWWXXXYYY[[[\\\]]]^^^___bbbcccdddeeefffggghhhjjjkkklllmmmnnnooopppqqqrrrssstttuuuvvvxxxzzz{{{|||}}}~~~^IDATxEM"" vQQQ@E,Q,XPؐ4{EkCT eg>df|⺷74XD, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"j"1zm䟮7?^o\mEIL!V6+{? h,X`Z8bXk䟮9VPuQ~VX6ck>9`w_FX]Xڶ_౅=Q#ii!jyƱ|y1j5:ڝc`O?.X m|`qVױJ븋u>JM\Qj>H]nwkmuqAոڗc`uV6X#)r QUV-uN?@5nGAu~m闏1vOXO:3\Z[ }jĚba#5r>wجzXK+Uxu%X=-zHja`}!5tZV7\A^j91e:u~Ag?K=Du>ѹ"JYl C߹>_-c7I’˹rk5bycP1cb}U1cp:aXϻX9?XXoXߕ:aXږ3Vcu[Ej3+yNE=]\X݇{R[)}N|ƤmqsTskGkW?޷e_:[4XNSO!]Hz UD>HX)Z$X&P7+Ǎ ˈzDaq`KVXE$X8X*"i"_,XG:iƃ͗YCXޑHŝ',DTgw~i31RbHoQ(hai'^PaΨ^Pap^`a;9{asi/i&9Xb8.8V8K g%9{5|+4XpX[,~.8,K{Ix˲ZjtVk/'UX}t R:W/d\{gᾭh-iX4'd7w ַ.> a>J딋k>s#i2t~NXXL+;oVU:_oϽ&i݊ZN)z?2N~Y)~ܰ)T0 }쳝\YXUYXU?wĚ%@HlU&UWSpYV)b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D, b X@"D,ccL^IENDB`testthat/tests/testthat/_snaps/snapshot-file/foo.r0000644000176200001440000000006415053663502022126 0ustar liggesusersa b c d e f g h i j k l m n o p q r s t u v w x y z testthat/tests/testthat/_snaps/snapshot-file/secret.txt0000644000176200001440000000004315047110120023165 0ustar liggesusers ssh squirrel testthat/tests/testthat/_snaps/snapshot-file/a.txt0000644000176200001440000000000315044170706022131 0ustar liggesusersa testthat/tests/testthat/_snaps/snapshot-file/foo.csv0000644000176200001440000000025715054053615022463 0ustar liggesusers"","mpg","cyl","disp","hp" "Mazda RX4",21,6,160,110 "Mazda RX4 Wag",21,6,160,110 "Datsun 710",22.8,4,108,93 "Hornet 4 Drive",21.4,6,258,110 "Hornet Sportabout",18.7,8,360,175 testthat/tests/testthat/_snaps/reporter-junit.md0000644000176200001440000000545415127731050021723 0ustar liggesusers# reporter doesn't change without warning Expected `x` to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-testthat::expect_true(FALSE) Error in `eval(code, test_env)`: stop Error in `h()`: ! Backtrace: x 1. \-f() 2. \-g() 3. \-h() def Backtrace: x 1. \-f() testthat/tests/testthat/_snaps/try-again.md0000644000176200001440000000114515127731052020620 0ustar liggesusers# tries multiple times Code result <- try_again(3, third_try()) Message i Expectation failed; trying again (1)... i Expectation failed; trying again (2)... --- Code try_again(1, third_try()) Message i Expectation failed; trying again (1)... Condition Error: ! Expected `i` to equal 0. Differences: `actual`: 1.0 `expected`: 0.0 # handles errors Code result <- try_again(3, expect_equal(fails_twice(), 1)) Message i Expectation errored; trying again (1)... i Expectation errored; trying again (2)... testthat/tests/testthat/_snaps/mock2.md0000644000176200001440000000061315127731050017735 0ustar liggesusers# with_mocked_bindings() validates its inputs Code with_mocked_bindings(1 + 1, function() 2) Condition Error in `local_mocked_bindings()`: ! All elements of `...` must be named. # can't mock bindings that don't exist Code local_mocked_bindings(f = function() "x") Condition Error in `local_mocked_bindings()`: ! Can't find binding for `f` testthat/tests/testthat/_snaps/reporter-list.md0000644000176200001440000000061115127731050021533 0ustar liggesusers# works in parallel Code results[, c(1:8, 12:13)] Output file context test nb failed skipped error warning passed result 1 f1 t11 2 0 FALSE FALSE 0 2 msg111, msg112 2 f2 t21 2 0 FALSE FALSE 0 2 msg211, msg212 3 f2 t22 1 0 TRUE FALSE 0 0 skip221 testthat/tests/testthat/_snaps/expect-silent.md0000644000176200001440000000027215127731050021507 0ustar liggesusers# generates useful failure message Code expect_silent(f()) Condition Error: ! Expected `f()` to run silently. Actual noise: output, warnings, messages. testthat/tests/testthat/_snaps/reporter-slow.md0000644000176200001440000000076415127731050021555 0ustar liggesusers# multiplication works Code show_all <- SlowReporter$new(min_time = 0) with_reporter(show_all, n_tests(10)) Output [-.--s] : run 1 [-.--s] : run 2 [-.--s] : run 3 [-.--s] : run 4 [-.--s] : run 5 [-.--s] : run 6 [-.--s] : run 7 [-.--s] : run 8 [-.--s] : run 9 [-.--s] : run 10 == Summary ===================================================================== All tests: -.--s Slow tests: -.--s testthat/tests/testthat/_snaps/edition.md0000644000176200001440000000074615127731046020371 0ustar liggesusers# checks its inputs Code local_edition("x") Condition Error in `local_edition()`: ! `x` must be a whole number, not the string "x". Code local_edition(5) Condition Error in `local_edition()`: ! `x` must be a whole number between 2 and 3, not the number 5. # deprecation only fired for newer edition Code edition_deprecate(3, "old stuff") Condition Warning: `old stuff` was deprecated in the 3rd edition. testthat/tests/testthat/_snaps/test-env.md0000644000176200001440000000026615127731051020474 0ustar liggesusers# is_snapshot() is true in snapshots Code is_snapshot() Output [1] TRUE --- true --- [1] TRUE --- Is snapshotting! --- Is snapshotting! testthat/tests/testthat/_snaps/snapshot-manage.md0000644000176200001440000000121315127731051022005 0ustar liggesusers# informs about files being accepted Code snapshot_accept(path = path) Message Updating snapshots: 'a.md' and 'test/b.txt'. # useful mesasge if no files to accept Code snapshot_accept(path = path) Message No snapshots to update. # can work with variants Code snapshot_accept(path = path) Message Updating snapshots: 'foo/a.md'. --- Code snapshot_accept("foo/a", path = path) Message Updating snapshots: 'foo/a.md'. # snapshot_reject deletes .new files Code snapshot_reject(path = path) Message Rejecting snapshots: * a.md * b.md testthat/tests/testthat/_snaps/reporter-teamcity.md0000644000176200001440000000563615127731051022414 0ustar liggesusers# reporter basics work ##teamcity[testSuiteStarted name='Successes'] ##teamcity[testSuiteStarted name='Success'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Success'] ##teamcity[testSuiteFinished name='Successes'] ##teamcity[testSuiteStarted name='Failures'] ##teamcity[testSuiteStarted name='Failure:1'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='Expected `x` to be TRUE.' details='Differences:|n`actual`: FALSE|n`expected`: TRUE '] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Failure:1'] ##teamcity[testSuiteStarted name='Failure:2a'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='Expected FALSE to be TRUE.' details='Differences:|n`actual`: FALSE|n`expected`: TRUE |n|nBacktrace:|n x|n 1. \-f()|n 2. \-testthat::expect_true(FALSE)'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Failure:2a'] ##teamcity[testSuiteFinished name='Failures'] ##teamcity[testSuiteStarted name='Errors'] ##teamcity[testSuiteStarted name='Error:1'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='Error in `eval(code, test_env)`: stop' details=''] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='Error:1'] ##teamcity[testSuiteStarted name='errors get tracebacks'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFailed name='expectation 1' message='Error in `h()`: !' details='Backtrace:|n x|n 1. \-f()|n 2. \-g()|n 3. \-h()'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testSuiteFinished name='errors get tracebacks'] ##teamcity[testSuiteFinished name='Errors'] ##teamcity[testSuiteStarted name='Skips'] ##teamcity[testSuiteStarted name='explicit skips are reported'] ##teamcity[testIgnored name='expectation 1' message='Reason: skip'] ##teamcity[testSuiteFinished name='explicit skips are reported'] ##teamcity[testSuiteStarted name='empty tests are implicitly skipped'] ##teamcity[testIgnored name='expectation 1' message='Reason: empty test'] ##teamcity[testSuiteFinished name='empty tests are implicitly skipped'] ##teamcity[testSuiteFinished name='Skips'] ##teamcity[testSuiteStarted name='Warnings'] ##teamcity[testSuiteStarted name='warnings get backtraces'] ##teamcity[testStarted name='expectation 1'] ##teamcity[testFinished name='expectation 1'] ##teamcity[testIgnored name='expectation 2' message='Reason: empty test'] ##teamcity[testSuiteFinished name='warnings get backtraces'] ##teamcity[testSuiteFinished name='Warnings'] testthat/tests/testthat/_snaps/expect-invisible.md0000644000176200001440000000123415127731047022202 0ustar liggesusers# basic principles of visibility hold Code expect_invisible(x) Condition Error: ! Expected `x` to return invisibly. Actual visibility: visible. --- Code expect_visible(x <- 1) Condition Error: ! Expected `x <- 1` to return visibly. Actual visibility: invisible. # generates useful failure messages Code expect_visible(invisible(1)) Condition Error: ! Expected `invisible(1)` to return visibly. Actual visibility: invisible. --- Code expect_invisible(1) Condition Error: ! Expected 1 to return invisibly. Actual visibility: visible. testthat/tests/testthat/_snaps/reporter-minimal.md0000644000176200001440000000004715127731050022211 0ustar liggesusers# reporter as expected .FFEESSWS testthat/tests/testthat/_snaps/R4.2/0000755000176200001440000000000015047110120017013 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.2/snapshot-file/0000755000176200001440000000000015047110120021567 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.2/snapshot-file/version.txt0000644000176200001440000000000515047110120024010 0ustar liggesusersR4.2 testthat/tests/testthat/_snaps/R4.2/snapshot.md0000644000176200001440000000013215047110120021170 0ustar liggesusers# variants save different values Code r_version() Output [1] "R4.2" testthat/tests/testthat/_snaps/reporter-rstudio.md0000644000176200001440000000122215127731050022250 0ustar liggesusers# reporter basics works 'reporters/tests.R:13:3' [failure] Failure:1. Expected `x` to be TRUE. 'reporters/tests.R:17:8' [failure] Failure:2a. Expected FALSE to be TRUE. 'reporters/tests.R:24:3' [error] Error:1. Error in `eval(code, test_env)`: stop 'reporters/tests.R:30:8' [error] errors get tracebacks. Error in `h()`: ! 'reporters/tests.R:38:3' [skip] explicit skips are reported. Reason: skip 'reporters/tests.R:41:1' [skip] empty tests are implicitly skipped. Reason: empty test 'reporters/tests.R:47:5' [warning] warnings get backtraces. def 'reporters/tests.R:45:1' [skip] warnings get backtraces. Reason: empty test testthat/tests/testthat/_snaps/expect-named.md0000644000176200001440000000374115127731047021307 0ustar liggesusers# expected_named verifies presence of names Code expect_named(x) Condition Error: ! Expected `x` to have names. # expected_named verifies actual of names Code expect_named(x, "b") Condition Error: ! Expected `x` to have names "b". Differences: `actual`: "a" `expected`: "b" # provide useful feedback on failure Code expect_named(x1, c("a", "b"), ignore.order = TRUE) Condition Error: ! Expected `x1` to have names `c("a", "b")`. Actual: "a" Expected: "a", "b" Absent: "b" --- Code expect_named(x2, "a", ignore.order = TRUE) Condition Error: ! Expected `x2` to have names "a". Actual: "a", "b" Expected: "a" Needs: "b" --- Code expect_named(x1, "b", ignore.order = TRUE) Condition Error: ! Expected `x1` to have names "b". Actual: "a" Expected: "b" Needs: "a" Absent: "b" --- Code expect_named(x1, c("a", "b"), ignore.order = FALSE) Condition Error: ! Expected `x1` to have names `c("a", "b")`. Differences: `actual`: "a" `expected`: "a" "b" --- Code expect_named(x2, "a", ignore.order = FALSE) Condition Error: ! Expected `x2` to have names "a". Differences: `actual`: "a" "b" `expected`: "a" --- Code expect_named(x1, "b", ignore.order = FALSE) Condition Error: ! Expected `x1` to have names "b". Differences: `actual`: "a" `expected`: "b" # expect_named validates its inputs Code expect_named(c(a = 1), "a", ignore.order = "yes") Condition Error in `expect_named()`: ! `ignore.order` must be `TRUE` or `FALSE`, not the string "yes". Code expect_named(c(a = 1), "a", ignore.case = "yes") Condition Error in `expect_named()`: ! `ignore.case` must be `TRUE` or `FALSE`, not the string "yes". testthat/tests/testthat/_snaps/source.md0000644000176200001440000000235315127731051020226 0ustar liggesusers# source_file wraps error Code source_file(test_path("reporters/error-setup.R"), wrap = FALSE) Condition Error: ! Failed to evaluate 'reporters/error-setup.R'. Caused by error in `h()`: ! ! # checks its inputs Code source_file(1) Condition Error: ! `path` must be a single string, not the number 1. Code source_file("x") Condition Error: ! `path` does not exist. Code source_file(".", "x") Condition Error: ! `env` must be an environment, not the string "x". # works on code like the describe() example Code filter_desc(code, c("math library", "division()", "can handle division by 0")) Condition Error: ! Failed to find test with description "can handle division by 0". # preserve srcrefs Code filter_desc(code, "foo") Output expression(test_that("foo", { # this is a comment })) # errors if zero or duplicate labels Code filter_desc(code, "baz") Condition Error: ! Found multiple tests with description "baz". Code filter_desc(code, "missing") Condition Error: ! Failed to find test with description "missing". testthat/tests/testthat/_snaps/R4.5/0000755000176200001440000000000015047715224017036 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.5/snapshot-file/0000755000176200001440000000000015047715224021612 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.5/snapshot-file/version.txt0000644000176200001440000000000515047715224024033 0ustar liggesusersR4.5 testthat/tests/testthat/_snaps/R4.5/snapshot.md0000644000176200001440000000013215127731052021207 0ustar liggesusers# variants save different values Code r_version() Output [1] "R4.5" testthat/tests/testthat/_snaps/test-path.md0000644000176200001440000000024115127731051020631 0ustar liggesusers# throws error if can't find tests/testthat Code test_path("empty") Condition Error in `test_path()`: ! Can't find 'tests/testthat'. testthat/tests/testthat/_snaps/snapshot-value.md0000644000176200001440000000510115127731051021671 0ustar liggesusers# can snapshot values [ "a", 1.5, 1, true ] --- { "type": "list", "attributes": {}, "value": [ { "type": "character", "attributes": {}, "value": ["a"] }, { "type": "double", "attributes": {}, "value": [1.5] }, { "type": "integer", "attributes": {}, "value": [1] }, { "type": "logical", "attributes": {}, "value": [true] } ] } --- list("a", 1.5, 1L, TRUE) --- WAoAAAACAAQCAwACAwAAAAATAAAABAAAABAAAAABAAQACQAAAAFhAAAADgAAAAE/+AAAAAAA AAAAAA0AAAABAAAAAQAAAAoAAAABAAAAAQ== # can control snapshot value details 1.2 --- WAoAAAACAAQCAwACAwAAAAMGAAAEAgAAAAEABAAJAAAABWNsYXNzAAAAEAAAAAEABAAJAAAA B2Zvcm11bGEAAAQCAAAAAQAEAAkAAAAMLkVudmlyb25tZW50AAAABAAAAAAAAAAEAAAAAAAA AAQAAAAAAAAA+QAAAAAAAAACAAQACQAAAAh0ZXN0dGhhdAAEAAkAAAAFMy4yLjAAAAD+AAAA EwAAAB0AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4A AAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA /gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAAEwAAAB0AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4A AAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA /gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAAEwAAAB0A AAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA /gAAAP4AAAQCAAAAAQAEAAkAAAABZgAAAwYAAAQCAAAB/wAAABAAAAABAAQACQAAAAdmb3Jt dWxhAAAEAgAAAv8AAAP/AAAA/gAAAAEABAAJAAAAAX4AAAACAAAADgAAAAE/8AAAAAAAAAAA AP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+AAAA/gAAAP4AAAD+ AAAA/gAAAP4AAAD+AAAI/wAAAAIAAAAOAAAAAT/wAAAAAAAAAAAA/g== # tolerance passed to check_roundtrip 0.9 # check_roundtrip() gives nice error Code wrapper(NULL, list(), label = "foo", style = "json") Condition Error in `wrapper()`: ! `foo` could not be safely serialized with `style` = "json". Serializing then deserializing the object returned something new: `original` is NULL `new` is a list i You may need to try a different `style`. # expect_snapshot_value validates its inputs Code expect_snapshot_value(123, cran = "yes") Condition Error in `expect_snapshot_value()`: ! `cran` must be `TRUE` or `FALSE`, not the string "yes". Code expect_snapshot_value(123, tolerance = "high") Condition Error in `expect_snapshot_value()`: ! `tolerance` must be a number, not the string "high". testthat/tests/testthat/_snaps/expect-equality.md0000644000176200001440000000547715127731047022070 0ustar liggesusers# provide useful feedback on failure (3e) Code expect_identical(x, "a") Condition Error: ! Expected `x` to be identical to "a". Differences: `actual` is a double vector (1) `expected` is a character vector ('a') --- Code expect_equal(x, "a") Condition Error: ! Expected `x` to equal "a". Differences: `actual` is a double vector (1) `expected` is a character vector ('a') --- Code expect_identical(x, "a") Condition Error: ! Expected `x` to be identical to "a". Differences: Types not compatible: double is not character --- Code expect_equal(x, "a") Condition Error: ! Expected `x` to equal "a". Differences: Types not compatible: double is not character # correctly spaces lines Code expect_equal(list(a = 1), list(a = "b", b = 10)) Condition Error: ! Expected `list(a = 1)` to equal `list(a = "b", b = 10)`. Differences: `actual` is length 1 `expected` is length 2 `names(actual)`: "a" `names(expected)`: "a" "b" `actual$a` is a double vector (1) `expected$a` is a character vector ('b') `actual$b` is absent `expected$b` is a double vector (10) # provide useful feedback on failure (2e) Code expect_identical(x, "a") Condition Error: ! Expected `x` to be identical to "a". Differences: Types not compatible: double is not character --- Code expect_equal(x, "a") Condition Error: ! Expected `x` to equal "a". Differences: Types not compatible: double is not character # default labels use unquoting Code expect_equal(x, !!y) Condition Error: ! Expected `x` to equal 2. Differences: `actual`: 1.0 `expected`: 2.0 # useful message if objects equal but not identical Code expect_identical(f, g) Condition Error: ! Expected `f` to be identical to `g`. Differences: Objects equal but not identical # attributes for object (#452) Code expect_equal(oops, 0) Condition Error: ! Expected `oops` to equal 0. Differences: Attributes: < Modes: list, NULL > Attributes: < Lengths: 1, 0 > Attributes: < names for target but not for current > Attributes: < current is not list-like > # expect_equal validates its inputs Code expect_equal(1, 2, tolerance = "high") Condition Error in `expect_equal()`: ! `tolerance` must be a number or `NULL`, not the string "high". Code expect_equal(1, 2, tolerance = -1) Condition Error in `expect_equal()`: ! `tolerance` must be a number larger than or equal to 0 or `NULL`, not the number -1. testthat/tests/testthat/_snaps/mock-oo.md0000644000176200001440000000355115127731050020272 0ustar liggesusers# validates its inputs Code local_mocked_s3_method(1) Condition Error in `local_mocked_s3_method()`: ! `generic` must be a single string, not the number 1. Code local_mocked_s3_method("mean", 1) Condition Error in `local_mocked_s3_method()`: ! `signature` must be a single string, not the number 1. Code local_mocked_s3_method("mean", "bar", 1) Condition Error in `local_mocked_s3_method()`: ! `definition` must be a function or `NULL`, not the number 1. Code local_mocked_s3_method("notAGeneric", "bar", function() { }) Condition Error in `get()`: ! object 'notAGeneric' not found --- Code local_mocked_s4_method(1) Condition Error in `local_mocked_s4_method()`: ! `generic` must be a single string, not the number 1. Code local_mocked_s4_method("mean", 1) Condition Error in `local_mocked_s4_method()`: ! `signature` must be a character vector, not the number 1. Code local_mocked_s4_method("mean", "bar", 1) Condition Error in `local_mocked_s4_method()`: ! `definition` must be a function or `NULL`, not the number 1. Code local_mocked_s4_method("notAGeneric", "bar", function() { }) Condition Error in `local_mocked_s4_method()`: ! Can't find generic `notAGeneric()`. --- Code local_mocked_r6_class(mean) Condition Error in `local_mocked_r6_class()`: ! `class` must be an R6 class definition, not a function. Code local_mocked_r6_class(TestMockClass, public = 1) Condition Error in `local_mocked_r6_class()`: ! `public` must be a list, not the number 1. Code local_mocked_r6_class(TestMockClass, private = 1) Condition Error in `local_mocked_r6_class()`: ! `private` must be a list, not the number 1. testthat/tests/testthat/_snaps/expect-all.md0000644000176200001440000000262315127731046020770 0ustar liggesusers# validates its inputs Code expect_all_equal(mean, 1) Condition Error in `expect_all_equal()`: ! `object` must be a vector, not a function. Code expect_all_equal(logical(), 1) Condition Error in `expect_all_equal()`: ! `object` must not be empty. Code expect_all_equal(1:10, mean) Condition Error in `expect_all_equal()`: ! `expected` must be a vector, not a function. Code expect_all_equal(1:10, 1:2) Condition Error in `expect_all_equal()`: ! `expected` must be length 1. # can compare atomic vectors Code expect_all_equal(x, TRUE) Condition Error: ! Expected every element of `x` to equal TRUE. Differences: `actual[2:8]`: TRUE TRUE TRUE FALSE TRUE TRUE TRUE `expected[2:8]`: TRUE TRUE TRUE TRUE TRUE TRUE TRUE # can compare named lists Code expect_all_equal(x, list(1)) Condition Error: ! Expected every element of `x` to equal `list(1)`. Differences: `actual$c`: 2.0 `expected$c`: 1.0 # truncates very long differences Code expect_all_equal(x, FALSE) Condition Error: ! Expected every element of `x` to equal FALSE. Differences: `actual`: TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE `expected`: FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE testthat/tests/testthat/_snaps/expect-reference.md0000644000176200001440000000022115127731047022147 0ustar liggesusers# succeeds only when same object Code expect_reference(x, 1) Condition Error: ! Expected `x` to be a reference to 1. testthat/tests/testthat/_snaps/R4.6/0000755000176200001440000000000015054053615017034 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.6/snapshot-file/0000755000176200001440000000000015054053615021610 5ustar liggesuserstestthat/tests/testthat/_snaps/R4.6/snapshot-file/version.txt0000644000176200001440000000000515054053615024031 0ustar liggesusersR4.6 testthat/tests/testthat/_snaps/R4.6/snapshot.md0000644000176200001440000000013215054053615021211 0ustar liggesusers# variants save different values Code r_version() Output [1] "R4.6" testthat/tests/testthat/_snaps/expect-condition.md0000644000176200001440000001204415127731047022205 0ustar liggesusers# regexp = NULL checks for presence of error Code expect_error(f()) Condition Error: ! Expected `f()` to throw a error. # regexp = NA checks for absence of error Code expect_error(f(), NA) Condition Error: ! Expected `f()` not to throw any errors. Actually got a with message: Yes # regexp = string matches for error message Code expect_error(f(), "No") Condition Error: ! Expected `f()` to throw a error. # expect_error validates its inputs Code expect_error(stop("!"), regexp = 1) Condition Error in `expect_error()`: ! `regexp` must be a single string, `NA`, or `NULL`, not the number 1. Code expect_error(stop("!"), class = 1) Condition Error in `expect_error()`: ! `class` must be a single string or `NULL`, not the number 1. Code expect_error(stop("!"), inherit = "yes") Condition Error in `expect_error()`: ! `inherit` must be `TRUE` or `FALSE`, not the string "yes". # message method is called when expecting error Code expect_error(fb(), NA) Condition Error: ! Expected `fb()` not to throw any errors. Actually got a with message: dispatched! # expect_warning validates its inputs Code expect_warning(warning("!"), regexp = 1) Condition Error in `expect_warning()`: ! `regexp` must be a single string, `NA`, or `NULL`, not the number 1. Code expect_warning(warning("!"), class = 1) Condition Error in `expect_warning()`: ! `class` must be a single string or `NULL`, not the number 1. Code expect_warning(warning("!"), inherit = "yes") Condition Error in `expect_warning()`: ! `inherit` must be `TRUE` or `FALSE`, not the string "yes". Code expect_warning(warning("!"), all = "yes") Condition Error in `expect_warning()`: ! `all` must be `TRUE` or `FALSE`, not the string "yes". # regexp = NA checks for absence of message Code expect_message(f(), NA) Condition Error: ! Expected `f()` not to throw any messages. Actually got a with message: ! # expect_message validates its inputs Code expect_message(message("!"), regexp = 1) Condition Error in `expect_message()`: ! `regexp` must be a single string, `NA`, or `NULL`, not the number 1. Code expect_message(message("!"), class = 1) Condition Error in `expect_message()`: ! `class` must be a single string or `NULL`, not the number 1. Code expect_message(message("!"), inherit = "yes") Condition Error in `expect_message()`: ! `inherit` must be `TRUE` or `FALSE`, not the string "yes". Code expect_message(message("!"), all = "yes") Condition Error in `expect_message()`: ! `all` must be `TRUE` or `FALSE`, not the string "yes". # condition class is included in failure Code expect_condition(f1(), class = "bar") Condition Error: ! Expected `f1()` to throw a condition with class . # expect_condition validates its inputs Code expect_condition(stop("!"), regexp = 1) Condition Error in `expect_condition()`: ! `regexp` must be a single string, `NA`, or `NULL`, not the number 1. Code expect_condition(stop("!"), class = 1) Condition Error in `expect_condition()`: ! `class` must be a single string or `NULL`, not the number 1. Code expect_condition(stop("!"), inherit = "yes") Condition Error in `expect_condition()`: ! `inherit` must be `TRUE` or `FALSE`, not the string "yes". # unused arguments generate an error Code expect_condition(stop("Hi!"), foo = "bar") Condition Error in `expect_condition()`: ! Can't supply `...` unless `regexp` is set. * Unused arguments: `foo`. i Did you mean to use `regexp` so `...` is passed to `grepl()`? Code expect_condition(stop("Hi!"), , , "bar") Condition Error in `expect_condition()`: ! Can't supply `...` unless `regexp` is set. * Unused arguments: `..1`. i Did you mean to use `regexp` so `...` is passed to `grepl()`? Code expect_condition(stop("Hi!"), , , "bar", fixed = TRUE) Condition Error in `expect_condition()`: ! Can't supply `...` unless `regexp` is set. * Unused arguments: `..1` and `fixed`. i Did you mean to use `regexp` so `...` is passed to `grepl()`? Code expect_condition(stop("Hi!"), "x", foo = "bar") Condition Error in `expect_condition()`: ! Failed to compare condition to `regexp`. Caused by error in `grepl()`: ! unused argument (foo = "bar") Code expect_condition(stop("Hi!"), pattern = "bar", fixed = TRUE) Condition Error in `expect_condition()`: ! Can't supply `...` unless `regexp` is set. * Unused arguments: `pattern` and `fixed`. i Did you mean to use `regexp` so `...` is passed to `grepl()`? testthat/tests/testthat/_snaps/reporter-check.md0000644000176200001440000001030615127731050021637 0ustar liggesusers# basic report works [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] == Skipped tests (3) =========================================================== * empty test (2): 'reporters/tests.R:41:1', 'reporters/tests.R:45:1' * skip (1): 'reporters/tests.R:38:3' == Warnings ==================================================================== -- Warning ('reporters/tests.R:47:5'): warnings get backtraces ----------------- def Backtrace: x 1. \-f() == Failed tests ================================================================ -- Failure ('reporters/tests.R:13:3'): Failure:1 ------------------------------- Expected `x` to be TRUE. Differences: `actual`: FALSE `expected`: TRUE -- Failure ('reporters/tests.R:17:8'): Failure:2a ------------------------------ Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-testthat::expect_true(FALSE) -- Error ('reporters/tests.R:24:3'): Error:1 ----------------------------------- Error in `eval(code, test_env)`: stop -- Error ('reporters/tests.R:30:8'): errors get tracebacks --------------------- Error in `h()`: ! Backtrace: x 1. \-f() 2. \-g() 3. \-h() [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] # doesn't truncate long lines [ FAIL 1 | WARN 0 | SKIP 0 | PASS 0 ] == Failed tests ================================================================ -- Failure ('reporters/long-test.R:2:3'): That very long test messages are not truncated because they contain useful information that you probably want to read -- Failure has been forced [ FAIL 1 | WARN 0 | SKIP 0 | PASS 0 ] # always shows summary [ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ] # shows warnings when not on CRAN [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] == Skipped tests (3) =========================================================== * empty test (2): 'reporters/tests.R:41:1', 'reporters/tests.R:45:1' * skip (1): 'reporters/tests.R:38:3' == Warnings ==================================================================== -- Warning ('reporters/tests.R:47:5'): warnings get backtraces ----------------- def Backtrace: x 1. \-f() == Failed tests ================================================================ -- Failure ('reporters/tests.R:13:3'): Failure:1 ------------------------------- Expected `x` to be TRUE. Differences: `actual`: FALSE `expected`: TRUE -- Failure ('reporters/tests.R:17:8'): Failure:2a ------------------------------ Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-testthat::expect_true(FALSE) -- Error ('reporters/tests.R:24:3'): Error:1 ----------------------------------- Error in `eval(code, test_env)`: stop -- Error ('reporters/tests.R:30:8'): errors get tracebacks --------------------- Error in `h()`: ! Backtrace: x 1. \-f() 2. \-g() 3. \-h() [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] # generates informative snapshot hints Code base::writeLines(snapshot_check_hint()) Output To review and process snapshots locally: * Locate check directory. * Copy 'tests/testthat/_snaps' to local package. * Run `testthat::snapshot_accept()` to accept all changes. * Run `testthat::snapshot_review()` to review all changes. --- Code base::writeLines(snapshot_check_hint()) Output To review and process snapshots locally: * Download and unzip artifact. * Copy 'tests/testthat/_snaps' to local package. * Run `testthat::snapshot_accept()` to accept all changes. * Run `testthat::snapshot_review()` to review all changes. --- Code base::writeLines(snapshot_check_hint()) Output To review and process snapshots locally: * Run `testthat::snapshot_download_gh("r-lib/testthat", "123")` to download snapshots. * Run `testthat::snapshot_accept()` to accept all changes. * Run `testthat::snapshot_review()` to review all changes. testthat/tests/testthat/_snaps/reporter-zzz.md0000644000176200001440000000077715127731051021433 0ustar liggesusers# useful error message if can't find reporter Code find_reporter(1) Condition Error in `find_reporter()`: ! `reporter` must be a string, a character vector, a reporter object, or a reporter class, not the number 1. Code find_reporter("blah") Condition Error in `find_reporter()`: ! Cannot find test reporter `blah`. Code find_reporter(c("summary", "blah")) Condition Error in `find_reporter()`: ! Cannot find test reporter `blah`. testthat/tests/testthat/_snaps/expect-constant.md0000644000176200001440000000126615127731047022054 0ustar liggesusers# logical tests act as expected Code expect_true(df) Condition Error: ! Expected `df` to be TRUE. Differences: `actual` is an S3 object of class , a list `expected` is a logical vector (TRUE) --- Code expect_false(df) Condition Error: ! Expected `df` to be FALSE. Differences: `actual` is an S3 object of class , a list `expected` is a logical vector (FALSE) # expect_null works Code expect_null(df) Condition Error: ! Expected `df` to be NULL. Differences: `actual` is an S3 object of class , a list `expected` is NULL testthat/tests/testthat/_snaps/expect-setequal.md0000644000176200001440000001307215127731050022036 0ustar liggesusers# warns if both inputs are named Code expect_setequal(c(a = 1), c(b = 1)) Condition Warning: expect_setequal() ignores names # checks inputs Code expect_setequal(sum, 1) Condition Error in `expect_setequal()`: ! `object` must be a vector, not a primitive function. Code expect_setequal(1, sum) Condition Error in `expect_setequal()`: ! `expected` must be a vector, not a primitive function. Code expect_setequal(!!fun, 1) Condition Error in `expect_setequal()`: ! `object` must be a vector, not a primitive function. Code expect_setequal(1, !!fun) Condition Error in `expect_setequal()`: ! `expected` must be a vector, not a primitive function. # useful message on failure Code expect_setequal("actual", "expected") Condition Error: ! Expected "actual" to have the same values as "expected". Actual: "actual" Expected: "expected" Needs: "actual" Absent: "expected" --- Code expect_setequal(x, y) Condition Error: ! Expected `x` to have the same values as `y`. Actual: 1, 2 Expected: 2 Needs: 1 --- Code expect_setequal(x, y) Condition Error: ! Expected `x` to have the same values as `y`. Actual: 2 Expected: 2, 3 Absent: 3 --- Code expect_setequal(x, y) Condition Error: ! Expected `x` to have the same values as `y`. Actual: 1, 2 Expected: 2, 3 Needs: 1 Absent: 3 --- Code expect_setequal(x, y) Condition Error: ! Expected `x` to have the same values as `y`. Actual: "a", "a" Expected: "b", "b", "b" Needs: "a" Absent: "b" --- Code expect_setequal(x, c("a", "b", "c", "d")) Condition Error: ! Expected `x` to have the same values as `c("a", "b", "c", "d")`. Actual: "a", "b", "c" Expected: "a", "b", "c", "d" Absent: "d" # truncates long vectors Code expect_setequal(x, y) Condition Error: ! Expected `x` to have the same values as `y`. Actual: 1, 2 Expected: 1, 2, 3, 4, 5, 6, 7, 8, 9, ... Absent: 3, 4, 5, 6, 7, 8, 9, 10, 11, ... # fails if names don't match Code expect_mapequal(x, y) Condition Error: ! Expected `x` to have the same names as `y`. Actual: "a", "b" Expected: "a" Needs: "b" --- Code expect_mapequal(y, x) Condition Error: ! Expected `y` to have the same names as `x`. Actual: "a" Expected: "a", "b" Absent: "b" # fails if values don't match Code expect_mapequal(x, y) Condition Error: ! Expected `x` to contain the same values as `y`. Differences: `actual$b`: 2.0 `expected$b`: 3.0 # warns if empty vector Code expect_success(expect_mapequal(list(), list())) Condition Warning: `object` and `expected` are empty vectors. # validates its inputs Code expect_mapequal(sum, named) Condition Error in `expect_mapequal()`: ! `object` must be a vector, not a primitive function. Code expect_mapequal(named, sum) Condition Error in `expect_mapequal()`: ! `expected` must be a vector, not a primitive function. Code expect_mapequal(unnamed, named) Condition Error in `expect_mapequal()`: ! All elements in `object` must have names. x Empty names at position: 1 Code expect_mapequal(named, unnamed) Condition Error in `expect_mapequal()`: ! All elements in `expected` must have names. x Empty names at position: 1 Code expect_mapequal(named, duplicated) Condition Error in `expect_mapequal()`: ! All elements in `expected` must have unique names. x Duplicate names: "x" Code expect_mapequal(duplicated, named) Condition Error in `expect_mapequal()`: ! All elements in `object` must have unique names. x Duplicate names: "x" # expect_contains() gives useful message on failure Code expect_contains(x1, x2) Condition Error: ! Expected `x1` to contain all values in `x2`. Actual: "a", "b", "c" Expected: "c", "d" Missing: "d" --- Code expect_contains(x1, x3) Condition Error: ! Expected `x1` to contain all values in `x3`. Actual: "a", "b", "c" Expected: "d", "e" Missing: "d", "e" # expect_in() gives useful message on failure Code expect_in(x1, x2) Condition Error: ! Expected `x1` to only contain values from `x2`. Actual: "a", "b" Expected: "b", "c" Invalid: "a" --- Code expect_in(x1, x3) Condition Error: ! Expected `x1` to only contain values from `x3`. Actual: "a", "b" Expected: "d", "e" Invalid: "a", "b" # expect_disjoint() gives useful message on failure Code expect_disjoint(x1, x2) Condition Error: ! Expected `x1` to be disjoint from `x2`. Actual: "a", "b", "c" Expected: None of "c", "d" Invalid: "c" --- Code expect_disjoint(x1, x3) Condition Error: ! Expected `x1` to be disjoint from `x3`. Actual: "a", "b", "c" Expected: None of "b", "c", "d" Invalid: "b", "c" --- Code expect_disjoint(NA, c("a", NA)) Condition Error: ! Expected NA to be disjoint from `c("a", NA)`. Actual: NA Expected: None of "a", NA Invalid: NA testthat/tests/testthat/_snaps/reporter-tap.md0000644000176200001440000000154715127731051021356 0ustar liggesusers# reporter works 1..9 # Context Successes ok 1 Success # Context Failures not ok 2 Failure:1 Expected `x` to be TRUE. Differences: `actual`: FALSE `expected`: TRUE not ok 3 Failure:2a Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-testthat::expect_true(FALSE) # Context Errors not ok 4 Error:1 Error in `eval(code, test_env)`: stop not ok 5 errors get tracebacks Error in `h()`: ! Backtrace: x 1. \-f() 2. \-g() 3. \-h() # Context Skips ok 6 # SKIP Reason: skip ok 7 # SKIP Reason: empty test # Context Warnings ok 8 # WARNING def Backtrace: x 1. \-f() ok 9 # SKIP Reason: empty test testthat/tests/testthat/_snaps/expect-inheritance.md0000644000176200001440000000754215127731047022517 0ustar liggesusers# expect_type checks typeof Code expect_type(x, "double") Condition Error: ! Expected `x` to have type "double". Actual type: "integer" # expect_type validates its inputs Code expect_type(1, c("integer", "double")) Condition Error in `expect_type()`: ! `type` must be a single string, not a character vector. # expect_is checks class Code expect_is(factor("a"), "integer") Condition Error: ! Expected `factor("a")` to inherit from "character". Actual inheritance: "factor" # expect_s3/s4_class fails if appropriate type Code expect_s3_class(x1, "double") Condition Error: ! Expected `x1` to be an S3 object. Actual OO type: none. --- Code expect_s3_class(x2, "double") Condition Error: ! Expected `x2` to be an S3 object. Actual OO type: S4. --- Code expect_s4_class(x3, "double") Condition Error: ! Expected `x3` to be an S4 object. Actual OO type: S3. # expect_s[34]_class can check not S3/S4 Code expect_s3_class(factor(), NA) Condition Error: ! Expected `factor()` not to be an S3 object. --- Code expect_s4_class(A(), NA) Condition Error: ! Expected `A()` not to be an S4 object. # test_s4_class respects class hierarchy Code expect_s4_class(C(), "D") Condition Error: ! Expected `C()` to inherit from "D". Actual class: "C"/"A"/"B"/"list"/"vector". # expect_s3_class validates its inputs Code expect_s3_class(factor("a"), 1) Condition Error in `expect_s3_class()`: ! `class` must be a character vector or NA, not the number 1. Code expect_s3_class(factor("a"), "factor", exact = "yes") Condition Error in `expect_s3_class()`: ! `exact` must be `TRUE` or `FALSE`, not the string "yes". # test_s3_class respects class hierarchy Code expect_s3_class(x, "c") Condition Error: ! Expected `x` to inherit from "c". Actual class: "a"/"b". --- Code expect_s3_class(x, c("c", "d")) Condition Error: ! Expected `x` to inherit from "c"/"d". Actual class: "a"/"b". # test_s3_class can request exact match Code expect_s3_class(x, "a", exact = TRUE) Condition Error: ! Expected `x` to have class "a". Actual class: "a"/"b". # expect_s4_class validates its inputs Code expect_s4_class(factor("a"), 1) Condition Error in `expect_s4_class()`: ! `class` must be a character vector or NA, not the number 1. # expect_r6_class generates useful failures Code expect_r6_class(x, "Student") Condition Error: ! Expected `x` to be an R6 object. Actual OO type: none. Code expect_r6_class(person, "Student") Condition Error: ! Expected `person` to inherit from "Student". Actual class: "Person"/"R6". # expect_r6_class validates its inputs Code expect_r6_class(1, c("Person", "Student")) Condition Error in `expect_r6_class()`: ! `class` must be a single string, not a character vector. # can check with actual class Code expect_s7_class(Foo(), class = Bar) Condition Error: ! Expected `Foo()` to inherit from . Actual class: . --- Code expect_s7_class(Baz(), class = Bar) Condition Error: ! Expected `Baz()` to inherit from . Actual class: /. # informative failure if not S7 Code expect_s7_class(x, Foo) Condition Error: ! Expected `x` to be an S7 object. Actual OO type: S3. # expect_s7_class validates its inputs Code expect_s7_class(1, 1) Condition Error in `expect_s7_class()`: ! `class` must be an S7 class object, not the number 1. testthat/tests/testthat/_snaps/reporter-summary.md0000644000176200001440000001062415127731051022263 0ustar liggesusers# can control appearance of dots reporters/tests: Successes: . Failures: 12 Errors: 34 Skips: SS Warnings: WS == Skipped ===================================================================== 1. explicit skips are reported ('reporters/tests.R:38:3') - Reason: skip 2. empty tests are implicitly skipped ('reporters/tests.R:41:1') - Reason: empty test 3. warnings get backtraces ('reporters/tests.R:45:1') - Reason: empty test == Warnings ==================================================================== 1. warnings get backtraces ('reporters/tests.R:47:5') - def == Failed ====================================================================== -- 1. Failure ('reporters/tests.R:13:3'): Failure:1 ---------------------------- Expected `x` to be TRUE. Differences: `actual`: FALSE `expected`: TRUE -- 2. Failure ('reporters/tests.R:17:8'): Failure:2a --------------------------- Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-testthat::expect_true(FALSE) -- 3. Error ('reporters/tests.R:24:3'): Error:1 -------------------------------- Error in `eval(code, test_env)`: stop -- 4. Error ('reporters/tests.R:30:8'): errors get tracebacks ------------------ Error in `h()`: ! Backtrace: x 1. \-f() 2. \-g() 3. \-h() == DONE ======================================================================== --- reporters/tests: Successes: Failures: 12 Errors: 34 Skips: SS Warnings: WS == Skipped ===================================================================== 1. explicit skips are reported ('reporters/tests.R:38:3') - Reason: skip 2. empty tests are implicitly skipped ('reporters/tests.R:41:1') - Reason: empty test 3. warnings get backtraces ('reporters/tests.R:45:1') - Reason: empty test == Warnings ==================================================================== 1. warnings get backtraces ('reporters/tests.R:47:5') - def == Failed ====================================================================== -- 1. Failure ('reporters/tests.R:13:3'): Failure:1 ---------------------------- Expected `x` to be TRUE. Differences: `actual`: FALSE `expected`: TRUE -- 2. Failure ('reporters/tests.R:17:8'): Failure:2a --------------------------- Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-testthat::expect_true(FALSE) -- 3. Error ('reporters/tests.R:24:3'): Error:1 -------------------------------- Error in `eval(code, test_env)`: stop -- 4. Error ('reporters/tests.R:30:8'): errors get tracebacks ------------------ Error in `h()`: ! Backtrace: x 1. \-f() 2. \-g() 3. \-h() == DONE ======================================================================== # can control maximum reports reporters/tests: Successes: . Failures: 12 Errors: 34 Skips: SS Warnings: WS == Skipped ===================================================================== 1. explicit skips are reported ('reporters/tests.R:38:3') - Reason: skip 2. empty tests are implicitly skipped ('reporters/tests.R:41:1') - Reason: empty test 3. warnings get backtraces ('reporters/tests.R:45:1') - Reason: empty test == Warnings ==================================================================== 1. warnings get backtraces ('reporters/tests.R:47:5') - def == Failed ====================================================================== -- 1. Failure ('reporters/tests.R:13:3'): Failure:1 ---------------------------- Expected `x` to be TRUE. Differences: `actual`: FALSE `expected`: TRUE -- 2. Failure ('reporters/tests.R:17:8'): Failure:2a --------------------------- Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-testthat::expect_true(FALSE) ... and 2 more Maximum number of 2 failures reached, some test results may be missing. == DONE ======================================================================== testthat/tests/testthat/_snaps/expect-self-test.md0000644000176200001440000000462615127731050022126 0ustar liggesusers# expect_failure() generates a useful error messages Code expect_failure(expect_no_failure()) Condition Error: ! Expected 0 successes and 1 failure. v Observed 0 successes. x Observed 0 failures. Code expect_failure(expect_many_failures()) Condition Error: ! Expected 0 successes and 1 failure. v Observed 0 successes. x Observed 2 failures. Code expect_failure(expect_has_success()) Condition Error: ! Expected 0 successes and 1 failure. x Observed 1 success. v Observed 1 failure. Code expect_failure(expect_both_wrong()) Condition Error: ! Expected 0 successes and 1 failure. x Observed 1 success. x Observed 0 failures. Code expect_failure(expect_failure_foo(), "bar") Condition Error: ! Expected failure message to match regexp "bar". Actual message: x | foo # errors in expect_success bubble up Code expect_success(abort("error")) Condition Error: ! error # show_failure Code show_failure(expect_true(FALSE)) Output Failed expectation: Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE # expect_success() generates a useful error messages Code expect_success(expect_no_success()) Condition Error: ! Expected 1 success and 0 failures. x Observed 0 successes. v Observed 0 failures. Code expect_success(expect_many_successes()) Condition Error: ! Expected 1 success and 0 failures. x Observed 2 successes. v Observed 0 failures. Code expect_success(expect_has_failure()) Condition Error: ! Expected 1 success and 0 failures. v Observed 1 success. x Observed 1 failure. Code expect_success(expect_both_wrong()) Condition Error: ! Expected 1 success and 0 failures. x Observed 0 successes. x Observed 1 failure. # expect_no are deprecated Code expect_no_failure(pass()) Condition Warning: `expect_no_failure()` was deprecated in testthat 3.3.0. i Please use `expect_success()` instead. Code expect_no_success(fail()) Condition Warning: `expect_no_success()` was deprecated in testthat 3.3.0. i Please use `expect_failure()` instead. testthat/tests/testthat/_snaps/quasi-label.md0000644000176200001440000000177515127731050021133 0ustar liggesusers# missing arguments are propagated Code expect_null(x$missing) Condition Error: ! Expected `x$missing` to be NULL. Differences: `actual` is absent `expected` is NULL # produces useful summaries for long calls Code expr_label(quote(foo(a = "this is a long argument", b = "this is a long argument", c = "this is a long argument"))) Output [1] "`foo(...)`" Code expr_label(quote(arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg)) Output [1] "`... + arg`" Code expr_label(quote(arg + (arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg))) Output [1] "`arg + ...`" Code expr_label(quote(function(a, b, c) { a + b + c })) Output [1] "`function(a, b, c) ...`" # informative error for missing arg Code expect_equal() Condition Error in `expect_equal()`: ! argument `object` is missing, with no default. testthat/tests/testthat/_snaps/expect-output.md0000644000176200001440000000152015127731047021554 0ustar liggesusers# expect = NA checks for no output Code expect_output(g(), NA) Condition Error: ! Expected `g()` to produce no output. Actual output: ! # expect = NULL checks for some output Code expect_output(f(), NULL) Condition Error: ! Expected `f()` to produce output. # expect = string checks for match Code expect_output(g(), "x") Condition Error: ! Expected output from `g()` to match regexp "x". Actual output: x | ! --- Code expect_output("a", "x") Condition Error: ! Expected "a" to produce output. # expect_output validates its inputs Code expect_output(cat("hello"), "hello", width = "wide") Condition Error in `expect_output()`: ! `width` must be a whole number, not the string "wide". testthat/tests/testthat/_snaps/snapshot.md0000644000176200001440000001257615127731052020576 0ustar liggesusers# can snapshot output y --- y --- y --- y # can snapshot everything Code f() Output [1] "1" Message 2 Condition Warning in `f()`: 3 Error in `f()`: ! 4 # empty lines are preserved Code f() Output 1 Message 2 Condition Warning in `f()`: 3 Error in `f()`: ! 4 # line-endings fixed before comparison Code cat(x) Output a b # multiple outputs of same type are collapsed Code x <- 1 y <- 1 { message("a") message("b") } Message a b Code { warning("a") warning("b") } Condition Warning: a Warning: b # can scrub output/messages/warnings/errors Code secret() Output [1] "" Message Condition Warning in `()`: Error in `()`: ! --- Code print("secret") Output [1] "****" # always checks error status Code expect_snapshot(print("!"), error = TRUE) Condition Error: ! Expected `print("!")` to throw a error. # snapshots of failures fail Code expect_snapshot(fail()) Condition Error: ! Failure has been forced # can capture error/warning messages This is an error --- This is a warning # snapshot captures deprecations Code foo() Condition Warning: `foo()` was deprecated in testthat 1.0.0. --- `foo()` was deprecated in testthat 1.0.0. --- `foo()` was deprecated in testthat 1.0.0. # can check error/warning classes Code expect_snapshot_error(1) Condition Error: ! 1 did not generate error --- Code expect_snapshot_error(1, class = "myerror") Condition Error: ! 1 did not generate error with class 'myerror' --- Code expect_snapshot_warning(1) Condition Error: ! 1 did not generate warning --- Code expect_snapshot_warning(1, class = "mywarning") Condition Error: ! 1 did not generate warning with class 'mywarning' # snapshot handles multi-line input Code 1 + 2 Output [1] 3 Code 3 + 4 Output [1] 7 Code # this is a comment # snapshot captures output if visible Code f_visible() Output [1] "x" --- Code f_invisible() # captures custom classes Code f() Message Hello Condition Warning: Goodbye Error in `f()`: ! Eeek! # even with multiple lines a b c --- a b c # `expect_snapshot()` does not inject Code x <- quote(!!foo) expect_equal(x, call("!", call("!", quote(foo)))) # full condition message is printed with rlang Code foo <- error_cnd("foo", message = "Title parent.") abort("Title.", parent = foo) Condition Error: ! Title. Caused by error: ! Title parent. # can print with and without condition classes Code f() Message foo Condition Warning in `f()`: bar Condition Error in `f()`: ! baz --- Code f() Message foo Condition Warning in `f()`: bar Error in `f()`: ! baz # errors and warnings are folded Code f() Condition Warning in `f()`: foo Error in `f()`: ! bar # hint is informative Code snapshot_hint("bar.R", reset_output = FALSE) Output * Run `testthat::snapshot_accept("bar.R")` to accept the change. * Run `testthat::snapshot_review("bar.R")` to review the change. # expect_snapshot validates its inputs Code expect_snapshot(1 + 1, cran = "yes") Condition Error in `expect_snapshot()`: ! `cran` must be `TRUE` or `FALSE`, not the string "yes". Code expect_snapshot(1 + 1, error = "yes") Condition Error in `expect_snapshot()`: ! `error` must be `TRUE` or `FALSE`, not the string "yes". Code expect_snapshot(1 + 1, cnd_class = "yes") Condition Error in `expect_snapshot()`: ! `cnd_class` must be `TRUE` or `FALSE`, not the string "yes". # expect_snapshot_output validates its inputs Code expect_snapshot_output(cat("test"), cran = "yes") Condition Error in `expect_snapshot_output()`: ! `cran` must be `TRUE` or `FALSE`, not the string "yes". # expect_snapshot_error validates its inputs Code expect_snapshot_error(stop("!"), class = 123) Condition Error in `expect_snapshot_error()`: ! `class` must be a single string, not the number 123. Code expect_snapshot_error(stop("!"), cran = "yes") Condition Error in `expect_snapshot_error()`: ! `cran` must be `TRUE` or `FALSE`, not the string "yes". # expect_snapshot_warning validates its inputs Code expect_snapshot_warning(warning("!"), class = 123) Condition Error in `expect_snapshot_warning()`: ! `class` must be a single string, not the number 123. Code expect_snapshot_warning(warning("!"), cran = "yes") Condition Error in `expect_snapshot_warning()`: ! `cran` must be `TRUE` or `FALSE`, not the string "yes". testthat/tests/testthat/_snaps/describe.md0000644000176200001440000000135215127731046020510 0ustar liggesusers# snapshot tests in describe Code 1 + 1 Output [1] 2 # snapshot tests in describe / and in it Code 2 + 2 Output [1] 4 # has to have a valid description for the block Code describe() Condition Error in `describe()`: ! `description` must be a single string, not absent. Code describe(c("a", "b")) Condition Error in `describe()`: ! `description` must be a single string, not a character vector. Code it() Condition Error in `it()`: ! `description` must be a single string, not absent. Code it(c("a", "b")) Condition Error in `it()`: ! `description` must be a single string, not a character vector. testthat/tests/testthat/_snaps/snapshot-reporter.md0000644000176200001440000000022215127731051022416 0ustar liggesusers# `expect_error()` can fail inside `expect_snapshot()` Code err$message Output [1] "Error: Expected NULL to throw a error." testthat/tests/testthat/_snaps/snapshot-file.md0000644000176200001440000000304715127731051021503 0ustar liggesusers# expect_snapshot_file finds duplicate snapshot files Code expect_snapshot_file(write_tmp_lines(r_version()), "version.txt", variant = r_version()) Condition Error in `expect_snapshot_file()`: ! Snapshot file names must be unique. "version.txt" has already been used. # warns on first creation Code out <- snapshot_file_equal_(path) Condition Warning: Adding new file snapshot: 'tests/testthat/_snaps/my-test/test.txt' --- Code snapshot_file_equal_("doesnt-exist.txt") Condition Error in `snapshot_file_equal_()`: ! 'doesnt-exist.txt' not found. # generates informative hint Code snapshot_hint("lala", reset_output = FALSE) Output * Run `testthat::snapshot_accept("lala")` to accept the change. * Run `testthat::snapshot_review("lala")` to review the change. # expect_snapshot_file validates its inputs Code expect_snapshot_file(123) Condition Error in `expect_snapshot_file()`: ! `path` must be a single string, not the number 123. Code expect_snapshot_file("doesnt-exist.txt") Condition Error in `expect_snapshot_file()`: ! 'doesnt-exist.txt' doesn't exist. Code expect_snapshot_file(path, 123) Condition Error in `expect_snapshot_file()`: ! `name` must be a single string, not the number 123. Code expect_snapshot_file(path, "test.txt", cran = "yes") Condition Error in `expect_snapshot_file()`: ! `cran` must be `TRUE` or `FALSE`, not the string "yes". testthat/tests/testthat/_snaps/reporter-llm.md0000644000176200001440000000342715127731050021354 0ustar liggesusers# reports issues immediately but not successes FAILURE: 'reporters/tests.R:13:3' ----------------- Expected `x` to be TRUE. Differences: `actual`: FALSE `expected`: TRUE FAILURE: 'reporters/tests.R:17:8' ----------------- Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-testthat::expect_true(FALSE) ERROR: 'reporters/tests.R:24:3' ------------------- Error in `eval(code, test_env)`: stop ERROR: 'reporters/tests.R:30:8' ------------------- Error in `h()`: ! Backtrace: x 1. \-f() 2. \-g() 3. \-h() SKIP: 'reporters/tests.R:38:3' -------------------- Reason: skip SKIP: 'reporters/tests.R:41:1' -------------------- Reason: empty test WARNING: 'reporters/tests.R:47:5' ----------------- def Backtrace: x 1. \-f() SKIP: 'reporters/tests.R:45:1' -------------------- Reason: empty test [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] # reports only summary for all successes [ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ] # fails after max_fail tests FAILURE: 'reporters/fail-many.R:3:5' -------------- Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE FAILURE: 'reporters/fail-many.R:3:5' -------------- Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE FAILURE: 'reporters/fail-many.R:3:5' -------------- Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Maximum number of failures exceeded; quitting. i Increase this number with (e.g.) `testthat::set_max_fails(Inf)` testthat/tests/testthat/_snaps/snapshot-cleanup.md0000644000176200001440000000031015127731051022201 0ustar liggesusers# snapshot cleanup makes nice message if needed Code snapshot_cleanup(dir) Message Deleting unused snapshots: 'a.md' and 'b.md' Code snapshot_cleanup(dir, c("a", "b")) testthat/tests/testthat/_snaps/expectation.md0000644000176200001440000000044515127731050021250 0ustar liggesusers# validates key inputs Code expect(1) Condition Error in `expect()`: ! `ok` must be `TRUE` or `FALSE`, not the number 1. Code expect(TRUE, 1) Condition Error in `expect()`: ! `failure_message` must be a character vector, not the number 1. testthat/tests/testthat/_snaps/test-compiled-code.md0000644000176200001440000000143215127731052022405 0ustar liggesusers# get_routine() fails when no routine exists Code get_routine("utils", "no_such_routine") Condition Error in `get_routine()`: ! Failed to locate routine `no_such_routine` in package utils. # validates inputs Code expect_cpp_tests_pass(123) Condition Error in `expect_cpp_tests_pass()`: ! `package` must be a single string, not the number 123. Code run_cpp_tests(123) Condition Error in `run_cpp_tests()`: ! `package` must be a single string, not the number 123. # useful messaging Code use_catch(path) Message v Added C++ unit testing infrastructure. i Please ensure you have LinkingTo: testthat in your DESCRIPTION. i Please ensure you have Suggests: xml2 in your DESCRIPTION. testthat/tests/testthat/_snaps/reporter-stop.md0000644000176200001440000000214115127731051021546 0ustar liggesusers# produces useful output Code with_reporter("stop", run_tests()) Output Test passed with 1 success. -- Failure: Failure:1 ---------------------------------------------------------- Expected `x` to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Condition Error: ! Test failed with 1 failure and 0 successes. # works nicely with nested tests Code with_reporter("stop", run_tests()) Output Test passed with 2 successes. -- Failure: failed then succeeded / failed-1 ----------------------------------- Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE -- Failure: failed then succeeded / failed-2 ----------------------------------- Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Condition Error: ! Test failed with 2 failures and 1 success. # errors when needed Code r$end_test() Condition Error: ! Test failed with 1 failure and 0 successes. testthat/tests/testthat/_snaps/deprec-condition.md0000644000176200001440000000027715127731046022163 0ustar liggesusers# is_informative_error is defunct Code is_informative_error(TRUE) Condition Error: ! `is_informative_error()` was deprecated in testthat 3.0.0 and is now defunct. testthat/tests/testthat/_snaps/mock.md0000644000176200001440000000063415127731050017656 0ustar liggesusers# now defunct Code local_mock() Condition Error: ! `local_mock()` was deprecated in testthat 3.2.0 and is now defunct. i Please use `local_mocked_bindings()` instead. Code with_mock(is_testing = function() FALSE) Condition Error: ! `with_mock()` was deprecated in testthat 3.2.0 and is now defunct. i Please use `with_mocked_bindings()` instead. testthat/tests/testthat/_snaps/reporter-progress.md0000644000176200001440000005375615127731051022447 0ustar liggesusers# captures error before first test v | F W S OK | Context / | 0 | reporters/error-setup - | 1 0 | reporters/error-setup x | 1 0 | reporters/error-setup -------------------------------------------------------------------------------- Error ('reporters/error-setup.R:3:6'): (code run outside of `test_that()`) Error in `h()`: ! Backtrace: x 1. +-testthat::setup(f()) 2. | \-rlang::eval_tidy(enquo(code), env = env) 3. \-f() 4. \-g() 5. \-h() -------------------------------------------------------------------------------- == Results ===================================================================== -- Failed tests ---------------------------------------------------------------- Error ('reporters/error-setup.R:3:6'): (code run outside of `test_that()`) Error in `h()`: ! Backtrace: x 1. +-testthat::setup(f()) 2. | \-rlang::eval_tidy(enquo(code), env = env) 3. \-f() 4. \-g() 5. \-h() [ FAIL 1 | WARN 0 | SKIP 0 | PASS 0 ] No one gets it right on their first try # gracefully handles multiple contexts v | F W S OK | Context / | 0 | reporters/context / | 0 | my context - | 1 | my context v | 1 | my context == Results ===================================================================== [ FAIL 0 | WARN 0 | SKIP 0 | PASS 1 ] Way to go! # fails after max_fail tests v | F W S OK | Context / | 0 | reporters/fail-many - | 1 0 | reporters/fail-many \ | 2 0 | reporters/fail-many | | 3 0 | reporters/fail-many / | 4 0 | reporters/fail-many - | 5 0 | reporters/fail-many \ | 6 0 | reporters/fail-many | | 7 0 | reporters/fail-many / | 8 0 | reporters/fail-many - | 9 0 | reporters/fail-many \ | 10 0 | reporters/fail-many | | 11 0 | reporters/fail-many x | 11 0 | reporters/fail-many -------------------------------------------------------------------------------- Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE -------------------------------------------------------------------------------- Maximum number of failures exceeded; quitting. i Increase this number with (e.g.) `testthat::set_max_fails(Inf)` # can fully suppress incremental updates v | F W S OK | Context / | 0 | reporters/successes - | 1 | reporters/successes \ | 2 | reporters/successes | | 3 | reporters/successes / | 4 | reporters/successes - | 5 | reporters/successes \ | 6 | reporters/successes | | 7 | reporters/successes v | 7 | reporters/successes == Results ===================================================================== [ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ] Way to go! --- v | F W S OK | Context v | 7 | reporters/successes == Results ===================================================================== [ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ] Way to go! # reports backtraces v | F W S OK | Context / | 0 | reporters/backtraces - | 1 0 | reporters/backtraces \ | 2 0 | reporters/backtraces | | 3 0 | reporters/backtraces / | 4 0 | reporters/backtraces - | 5 0 | reporters/backtraces \ | 6 0 | reporters/backtraces | | 6 1 0 | reporters/backtraces / | 6 1 1 | reporters/backtraces - | 7 1 1 | reporters/backtraces \ | 8 1 1 | reporters/backtraces | | 9 1 1 | reporters/backtraces / | 10 1 1 | reporters/backtraces x | 10 1 1 | reporters/backtraces -------------------------------------------------------------------------------- Error ('reporters/backtraces.R:3:8'): errors thrown at block level are entraced Error in `g()`: foo Backtrace: x 1. \-f() 2. \-g() Error ('reporters/backtraces.R:8:10'): errors thrown from a quasi-labelled argument are entraced Error in `foo()`: foo Backtrace: x 1. +-testthat::expect_s3_class(foo(), "foo") 2. | \-testthat::quasi_label(enquo(object)) 3. | \-rlang::eval_bare(expr, quo_get_env(quo)) 4. \-foo() Error ('reporters/backtraces.R:13:10'): errors thrown from a quasi-labelled argument are entraced (deep case) Error in `foo()`: foo Backtrace: x 1. +-testthat::expect_s3_class(f(), "foo") 2. | \-testthat::quasi_label(enquo(object)) 3. | \-rlang::eval_bare(expr, quo_get_env(quo)) 4. \-f() 5. \-g() 6. +-testthat::expect_s3_class(foo(), "foo") 7. | \-testthat::quasi_label(enquo(object)) 8. | \-rlang::eval_bare(expr, quo_get_env(quo)) 9. \-foo() Error ('reporters/backtraces.R:21:10'): errors thrown from a quasi-labelled argument are entraced (deep deep case) Error in `bar()`: foobar Backtrace: x 1. \-f() 2. \-g() 3. +-testthat::expect_s3_class(foo(), "foo") 4. | \-testthat::quasi_label(enquo(object)) 5. | \-rlang::eval_bare(expr, quo_get_env(quo)) 6. \-foo() 7. \-bar() Error ('reporters/backtraces.R:32:16'): failed expect_error() prints a backtrace Error in `signaller()`: bar Backtrace: x 1. +-testthat::expect_error(f(), "foo") 2. | \-testthat:::expect_condition_matching_(...) 3. | \-testthat:::quasi_capture(...) 4. | +-testthat (local) .capture(...) 5. | | \-base::withCallingHandlers(...) 6. | \-rlang::eval_bare(quo_get_expr(.quo), quo_get_env(.quo)) 7. \-f() 8. \-signaller() Error ('reporters/backtraces.R:41:3'): Errors are inspected with `conditionMessage()` Error in `eval(code, test_env)`: dispatched Backtrace: x 1. \-rlang::abort("Wrong message", "foobar") Warning ('reporters/backtraces.R:46:10'): also get backtraces for warnings foobar Backtrace: x 1. \-foo() 2. \-bar() Error ('reporters/backtraces.R:54:5'): deep stacks are shown Error in `f(x - 1)`: This is deep Backtrace: x 1. \-f(25) 2. \-f(x - 1) 3. \-f(x - 1) 4. \-f(x - 1) 5. \-f(x - 1) 6. \-f(x - 1) 7. \-f(x - 1) 8. \-f(x - 1) 9. \-f(x - 1) 10. \-f(x - 1) 11. \-f(x - 1) 12. \-f(x - 1) 13. \-f(x - 1) 14. \-f(x - 1) 15. \-f(x - 1) 16. \-f(x - 1) 17. \-f(x - 1) 18. \-f(x - 1) 19. \-f(x - 1) 20. \-f(x - 1) 21. \-f(x - 1) 22. \-f(x - 1) 23. \-f(x - 1) 24. \-f(x - 1) 25. \-f(x - 1) 26. \-f(x - 1) Error ('reporters/backtraces.R:64:3'): errors in snapshots get useful backtraces Error in `h()`: ! Backtrace: x 1. \-f() 2. \-g() 3. \-h() Failure ('reporters/backtraces.R:70:6'): (code run outside of `test_that()`) Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-g() 3. \-h() 4. \-testthat::expect_true(FALSE) Failure ('reporters/backtraces.R:75:3'): nested expectations get backtraces Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-g() 3. \-h() 4. \-testthat::expect_true(FALSE) -------------------------------------------------------------------------------- == Results ===================================================================== -- Failed tests ---------------------------------------------------------------- Error ('reporters/backtraces.R:3:8'): errors thrown at block level are entraced Error in `g()`: foo Backtrace: x 1. \-f() 2. \-g() Error ('reporters/backtraces.R:8:10'): errors thrown from a quasi-labelled argument are entraced Error in `foo()`: foo Backtrace: x 1. +-testthat::expect_s3_class(foo(), "foo") 2. | \-testthat::quasi_label(enquo(object)) 3. | \-rlang::eval_bare(expr, quo_get_env(quo)) 4. \-foo() Error ('reporters/backtraces.R:13:10'): errors thrown from a quasi-labelled argument are entraced (deep case) Error in `foo()`: foo Backtrace: x 1. +-testthat::expect_s3_class(f(), "foo") 2. | \-testthat::quasi_label(enquo(object)) 3. | \-rlang::eval_bare(expr, quo_get_env(quo)) 4. \-f() 5. \-g() 6. +-testthat::expect_s3_class(foo(), "foo") 7. | \-testthat::quasi_label(enquo(object)) 8. | \-rlang::eval_bare(expr, quo_get_env(quo)) 9. \-foo() Error ('reporters/backtraces.R:21:10'): errors thrown from a quasi-labelled argument are entraced (deep deep case) Error in `bar()`: foobar Backtrace: x 1. \-f() 2. \-g() 3. +-testthat::expect_s3_class(foo(), "foo") 4. | \-testthat::quasi_label(enquo(object)) 5. | \-rlang::eval_bare(expr, quo_get_env(quo)) 6. \-foo() 7. \-bar() Error ('reporters/backtraces.R:32:16'): failed expect_error() prints a backtrace Error in `signaller()`: bar Backtrace: x 1. +-testthat::expect_error(f(), "foo") 2. | \-testthat:::expect_condition_matching_(...) 3. | \-testthat:::quasi_capture(...) 4. | +-testthat (local) .capture(...) 5. | | \-base::withCallingHandlers(...) 6. | \-rlang::eval_bare(quo_get_expr(.quo), quo_get_env(.quo)) 7. \-f() 8. \-signaller() Error ('reporters/backtraces.R:41:3'): Errors are inspected with `conditionMessage()` Error in `eval(code, test_env)`: dispatched Backtrace: x 1. \-rlang::abort("Wrong message", "foobar") Error ('reporters/backtraces.R:54:5'): deep stacks are shown Error in `f(x - 1)`: This is deep Backtrace: x 1. \-f(25) 2. \-f(x - 1) 3. \-f(x - 1) 4. \-f(x - 1) 5. \-f(x - 1) 6. \-f(x - 1) 7. \-f(x - 1) 8. \-f(x - 1) 9. \-f(x - 1) 10. \-f(x - 1) 11. \-f(x - 1) 12. \-f(x - 1) 13. \-f(x - 1) 14. \-f(x - 1) 15. \-f(x - 1) 16. \-f(x - 1) 17. \-f(x - 1) 18. \-f(x - 1) 19. \-f(x - 1) 20. \-f(x - 1) 21. \-f(x - 1) 22. \-f(x - 1) 23. \-f(x - 1) 24. \-f(x - 1) 25. \-f(x - 1) 26. \-f(x - 1) Error ('reporters/backtraces.R:64:3'): errors in snapshots get useful backtraces Error in `h()`: ! Backtrace: x 1. \-f() 2. \-g() 3. \-h() Failure ('reporters/backtraces.R:70:6'): (code run outside of `test_that()`) Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-g() 3. \-h() 4. \-testthat::expect_true(FALSE) Failure ('reporters/backtraces.R:75:3'): nested expectations get backtraces Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-g() 3. \-h() 4. \-testthat::expect_true(FALSE) [ FAIL 10 | WARN 1 | SKIP 0 | PASS 1 ] No one gets it right on their first try # records skips v | F W S OK | Context / | 0 | reporters/skips - | 1 0 | reporters/skips \ | 2 0 | reporters/skips v | 2 0 | reporters/skips == Results ===================================================================== -- Skipped tests (2) ----------------------------------------------------------- * longer skip (1): 'reporters/skips.R:6:3' * regular skip (1): 'reporters/skips.R:2:3' [ FAIL 0 | WARN 0 | SKIP 2 | PASS 0 ] Way to go! # compact display is informative == Testing reporters/tests.R =================================================== [ FAIL 0 | WARN 0 | SKIP 0 | PASS 0 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 0 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 1 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 2 | WARN 0 | SKIP 0 | PASS 1 ] -- Failure ('reporters/tests.R:13:3'): Failure:1 ------------------------------- Expected `x` to be TRUE. Differences: `actual`: FALSE `expected`: TRUE -- Failure ('reporters/tests.R:17:8'): Failure:2a ------------------------------ Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Backtrace: x 1. \-f() 2. \-testthat::expect_true(FALSE) [ FAIL 2 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 3 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 4 | WARN 0 | SKIP 0 | PASS 1 ] -- Error ('reporters/tests.R:24:3'): Error:1 ----------------------------------- Error in `eval(code, test_env)`: stop -- Error ('reporters/tests.R:30:8'): errors get tracebacks --------------------- Error in `h()`: ! Backtrace: x 1. \-f() 2. \-g() 3. \-h() [ FAIL 4 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 4 | WARN 0 | SKIP 1 | PASS 1 ] [ FAIL 4 | WARN 0 | SKIP 2 | PASS 1 ] [ FAIL 4 | WARN 0 | SKIP 2 | PASS 1 ] [ FAIL 4 | WARN 1 | SKIP 2 | PASS 1 ] [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] -- Warning ('reporters/tests.R:47:5'): warnings get backtraces ----------------- def Backtrace: x 1. \-f() -- Skipped tests (3) ----------------------------------------------------------- * empty test (2): 'reporters/tests.R:41:1', 'reporters/tests.R:45:1' * skip (1): 'reporters/tests.R:38:3' [ FAIL 4 | WARN 1 | SKIP 3 | PASS 1 ] # display of successes only is compact == Testing reporters/successes.R =============================================== [ FAIL 0 | WARN 0 | SKIP 0 | PASS 0 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 2 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 3 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 4 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 5 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 6 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ] Done! --- == Testing reporters/skips.R =================================================== [ FAIL 0 | WARN 0 | SKIP 0 | PASS 0 ] [ FAIL 0 | WARN 0 | SKIP 1 | PASS 0 ] [ FAIL 0 | WARN 0 | SKIP 2 | PASS 0 ] -- Skipped tests (2) ----------------------------------------------------------- * longer skip (1): 'reporters/skips.R:6:3' * regular skip (1): 'reporters/skips.R:2:3' --- [ FAIL 0 | WARN 0 | SKIP 0 | PASS 0 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 1 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 2 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 3 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 4 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 5 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 6 ] [ FAIL 0 | WARN 0 | SKIP 0 | PASS 7 ] # ParallelProgressReporter fails after max_fail tests v | F W S OK | Context - [ FAIL 0 | WARN 0 | SKIP 0 | PASS 0 ] Starting up... \ [ FAIL 11 | WARN 0 | SKIP 0 | PASS 0 ] @ reporters/fail-many -------------------------------------------------------------------------------- Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Failure ('reporters/fail-many.R:3:5'): Example Expected FALSE to be TRUE. Differences: `actual`: FALSE `expected`: TRUE Maximum number of failures exceeded; quitting. i Increase this number with (e.g.) `testthat::set_max_fails(Inf)` testthat/tests/testthat/_snaps/test-files.md0000644000176200001440000000531415127731052021006 0ustar liggesusers# stops on failure Code test_dir(test_path("test_dir"), reporter = "silent") Condition Error: ! Test failures. # runs all tests and records output file context test nb failed skipped error warning passed 1 test-basic.R logical tests act as expected 2 0 FALSE FALSE 0 2 2 test-basic.R logical tests ignore attributes 2 0 FALSE FALSE 0 2 3 test-basic.R equality holds 2 0 FALSE FALSE 0 2 4 test-basic.R can't access variables from other tests 2 1 0 TRUE FALSE 0 0 5 test-basic.R can't access variables from other tests 1 1 0 FALSE FALSE 0 1 6 test-empty.R empty test 1 0 TRUE FALSE 0 0 7 test-empty.R empty test with error 0 0 FALSE TRUE 0 0 8 test-errors.R simple 0 0 FALSE TRUE 0 0 9 test-errors.R after one success 1 0 FALSE TRUE 0 1 10 test-errors.R after one failure 1 1 FALSE TRUE 0 0 11 test-errors.R in the test 0 0 FALSE TRUE 0 0 12 test-errors.R in expect_error 1 0 FALSE FALSE 0 1 13 test-failures.R just one failure 1 1 FALSE FALSE 0 0 14 test-failures.R one failure on two 2 1 FALSE FALSE 0 1 15 test-failures.R no failure 2 0 FALSE FALSE 0 2 16 test-helper.R helper test 1 0 FALSE FALSE 0 1 17 test-skip.R Skips skip 1 0 TRUE FALSE 0 0 # complains if no files Code test_dir(path) Condition Error in `test_dir()`: ! No test files found. # can control if failures generate errors Code test_error(stop_on_failure = TRUE) Condition Error: ! Test failures. # can control if warnings errors Code test_warning(stop_on_warning = TRUE) Condition Error: ! Tests generated warnings. # complains if file doesn't exist Code test_file("DOESNTEXIST") Condition Error in `test_file()`: ! `path` does not exist. testthat/tests/testthat/_snaps/expect-shape.md0000644000176200001440000000665115127731050021320 0ustar liggesusers# generates actionable failure message Code expect_length(x, 2) Condition Error: ! Expected `x` to have length 2. Actual length: 10. # expect_length validates its inputs Code expect_length(1:5, "a") Condition Error in `expect_length()`: ! `n` must be a whole number, not the string "a". # dim compared correctly Code expect_shape(matrix(nrow = 6, ncol = 3), dim = c(6L, 2L)) Condition Error: ! Expected `matrix(nrow = 6, ncol = 3)` to have dim (6, 2). Actual dim: (6, 3). --- Code expect_shape(matrix(nrow = 6, ncol = 3), dim = c(7L, 3L)) Condition Error: ! Expected `matrix(nrow = 6, ncol = 3)` to have dim (7, 3). Actual dim: (6, 3). --- Code expect_shape(array(dim = 1:3), dim = 1:2) Condition Error: ! Expected `array(dim = 1:3)` to have 2 dimensions. Actual dimensions: 3. --- Code expect_shape(array(dim = 1:3), dim = 1:4) Condition Error: ! Expected `array(dim = 1:3)` to have 4 dimensions. Actual dimensions: 3. # nrow compared correctly Code expect_shape(matrix(nrow = 5, ncol = 5), nrow = 6L) Condition Error: ! Expected `matrix(nrow = 5, ncol = 5)` to have 6 rows. Actual rows: 5. --- Code expect_shape(1, nrow = 1) Condition Error: ! Expected 1 to have dimensions. # ncol compared correctly Code expect_shape(matrix(nrow = 5, ncol = 5), ncol = 7L) Condition Error: ! Expected `matrix(nrow = 5, ncol = 5)` to have 7 columns. Actual columns: 5. --- Code expect_shape(array(1), ncol = 1) Condition Error: ! Expected `array(1)` to have two or more dimensions. --- Code expect_shape(array(integer()), ncol = 0L) Condition Error: ! Expected `array(integer())` to have two or more dimensions. # NA handling (e.g. dbplyr) Code expect_shape(x, nrow = 10L) Condition Error: ! Expected `x` to have 10 rows. Actual rows: NA. --- Code expect_shape(x, ncol = NA_integer_) Condition Error: ! Expected `x` to have NA columns. Actual columns: 10. --- Code expect_shape(x, dim = c(10L, NA_integer_)) Condition Error: ! Expected `x` to have dim (10, NA). Actual dim: (NA, 10). # checks inputs arguments, Code expect_shape(1:10) Condition Error in `expect_shape()`: ! One of `nrow`, `ncol`, or `dim` must be supplied. Code expect_shape(1:10, nrow = 1L, ncol = 2L) Condition Error in `expect_shape()`: ! Exactly one of `nrow`, `ncol`, or `dim` must be supplied. x `nrow` and `ncol` were supplied together. Code expect_shape(1:10, 2) Condition Error in `expect_shape()`: ! `...` must be empty. x Problematic argument: * ..1 = 2 i Did you forget to name an argument? Code expect_shape(array(1), nrow = "x") Condition Error in `expect_shape()`: ! `nrow` must be a whole number or `NA`, not the string "x". Code expect_shape(array(1), ncol = "x") Condition Error in `expect_shape()`: ! `ncol` must be a whole number or `NA`, not the string "x". Code expect_shape(array(1), dim = "x") Condition Error in `expect_shape()`: ! `dim` must be a numeric vector, not the string "x". testthat/tests/testthat/_snaps/reporter-location.md0000644000176200001440000000167015127731050022376 0ustar liggesusers# reporter as expected Start test: Success 'reporters/tests.R:6:3' [success] End test: Success Start test: Failure:1 'reporters/tests.R:13:3' [failure] End test: Failure:1 Start test: Failure:2a 'reporters/tests.R:17:8' [failure] End test: Failure:2a Start test: Error:1 'reporters/tests.R:24:3' [error] End test: Error:1 Start test: errors get tracebacks 'reporters/tests.R:30:8' [error] End test: errors get tracebacks Start test: explicit skips are reported 'reporters/tests.R:38:3' [skip] End test: explicit skips are reported Start test: empty tests are implicitly skipped 'reporters/tests.R:41:1' [skip] End test: empty tests are implicitly skipped Start test: warnings get backtraces 'reporters/tests.R:47:5' [warning] 'reporters/tests.R:45:1' [skip] End test: warnings get backtraces testthat/tests/testthat/_snaps/expect-match.md0000644000176200001440000001012515127731047021311 0ustar liggesusers# useful failure if empty Code expect_match(zero, "asdf") Condition Error: ! Expected `zero` to have at least one element. # useful failure messages for scalars Code expect_match(one, "asdf") Condition Error: ! Expected `one` to match regexp "asdf". Actual text: ✖ │ bcde --- Code expect_match(one, "asdf", fixed = TRUE) Condition Error: ! Expected `one` to match string "asdf". Actual text: ✖ │ bcde # useful failure messages for vectors Code expect_match(many, "a") Condition Error: ! Expected every element of `many` to match regexp "a". Actual text: ✔ │ a ✔ │ a ✖ │ b --- Code expect_match(many, "c", all = FALSE) Condition Error: ! Expected some element of `many` to match regexp "c". Actual text: ✖ │ a ✖ │ a ✖ │ b --- Code expect_match(paragraph, "paragraph") Condition Error: ! Expected every element of `paragraph` to match regexp "paragraph". Actual text: ✔ │ This is a multiline │ paragraph. ✖ │ Second element. --- Code expect_match(na, "NA") Condition Error: ! Expected every element of `na` to match regexp "NA". Actual text: ✔ │ NA ✖ │ # expect_match validates its inputs Code expect_match(1) Condition Error in `expect_match()`: ! `object` must be a character vector, not the number 1. Code expect_match("x", 1) Condition Error in `expect_match()`: ! `regexp` must be a single string, not the number 1. Code expect_match("x", "x", fixed = 1) Condition Error in `expect_match()`: ! `fixed` must be `TRUE` or `FALSE`, not the number 1. Code expect_match("x", "x", perl = 1) Condition Error in `expect_match()`: ! `perl` must be `TRUE` or `FALSE`, not the number 1. Code expect_match("x", "x", all = 1) Condition Error in `expect_match()`: ! `all` must be `TRUE` or `FALSE`, not the number 1. # expect_no_match validates its inputs Code expect_no_match(1, "x") Condition Error in `expect_no_match()`: ! `object` must be a character vector, not the number 1. Code expect_no_match("x", 1) Condition Error in `expect_no_match()`: ! `regexp` must be a single string, not the number 1. Code expect_no_match("x", "x", fixed = 1) Condition Error in `expect_no_match()`: ! `fixed` must be `TRUE` or `FALSE`, not the number 1. Code expect_no_match("x", "x", perl = 1) Condition Error in `expect_no_match()`: ! `perl` must be `TRUE` or `FALSE`, not the number 1. Code expect_no_match("x", "x", all = 1) Condition Error in `expect_no_match()`: ! `all` must be `TRUE` or `FALSE`, not the number 1. # expect_no_match works Code expect_no_match(x, "e*", fixed = TRUE) Condition Error: ! Expected `x` not to match string "e*". Actual text: x | te*st --- Code expect_no_match(x, "TEST", ignore.case = TRUE) Condition Error: ! Expected `x` not to match regexp "TEST". Actual text: x | test # show_text() shows success and failure Code base::writeLines(show_text(c("a", "b"), c(TRUE, FALSE))) Output ✔ │ a ✖ │ b # show_text() truncates values and lines Code base::writeLines(show_text(lines, max_lines = 3)) Output ✔ │ a │ b │ ... ... and 8 more. Code base::writeLines(show_text(lines, max_items = 3)) Output ✔ │ a │ b │ c ✔ │ d │ e │ f ✔ │ g │ h │ i ... and 6 more. Code base::writeLines(show_text(lines, max_items = 2, max_lines = 4)) Output ✔ │ a │ b │ c ✔ │ d │ ... ... and 8 more. testthat/tests/testthat/_snaps/mock2-helpers.md0000644000176200001440000000035215127731050021375 0ustar liggesusers# mock_output_sequence() works Code mocked_sequence() Condition Error in `mocked_sequence()`: ! Can't find value for 4th iteration. i `...` has only 3 values. i You can set `recycle` to `TRUE`. testthat/tests/testthat/_snaps/reporter-silent.md0000644000176200001440000000010715127731050022056 0ustar liggesusers# captures expectations; doesn't produce any output --- 9 testthat/tests/testthat/test-parallel-stdout.R0000644000176200001440000000140215054053615021331 0ustar liggesuserstest_that("stdout/stderr in parallel code", { skip_on_covr() withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") assemble_msgs <- function(txt, test_name) { prefix <- paste0("> ", test_name, ": ") parts <- sub( prefix, "", grep(prefix, out, fixed = TRUE, value = TRUE), fixed = TRUE ) paste(parts, collapse = "") } for (reporter in c("summary", "progress")) { out <- capture.output(suppressMessages(testthat::test_local( test_path("test-parallel", "stdout"), reporter = reporter ))) msg2 <- assemble_msgs(out, "test-stdout-2.R") expect_match(msg2, "This is a message!", fixed = TRUE) msg3 <- assemble_msgs(out, "test-stdout-3.R") expect_match(msg3, "[1] 1 2 3", fixed = TRUE) } }) testthat/tests/testthat/reporters/0000755000176200001440000000000015130237654017147 5ustar liggesuserstestthat/tests/testthat/reporters/error-setup.R0000644000176200001440000000013714463002164021554 0ustar liggesusersf <- function() g() g <- function() h() h <- function() stop("!") local_edition(2) setup(f()) testthat/tests/testthat/reporters/context.R0000644000176200001440000000014014313315331020740 0ustar liggesuserstestthat:::local_edition(2) context("my context") test_that("a test", { expect_true(TRUE) }) testthat/tests/testthat/reporters/nested.R0000644000176200001440000000040015056632045020546 0ustar liggesusersdescribe("succeeded", { it("succeeded-1", expect_true(TRUE)) it("succeeded-2", expect_true(TRUE)) }) describe("failed then succeeded", { it("failed-1", expect_true(FALSE)) it("failed-2", expect_true(FALSE)) it("succeeded", expect_true(TRUE)) }) testthat/tests/testthat/reporters/state-change.R0000644000176200001440000000007715040747540021641 0ustar liggesuserstest_that("options", { options(x = 1) expect_true(TRUE) }) testthat/tests/testthat/reporters/skips.R0000644000176200001440000000021114313315331020404 0ustar liggesuserstest_that("regular skip", { skip("regular skip") }) test_that("skip with details", { skip("longer skip:\nthis is what happened") }) testthat/tests/testthat/reporters/backtraces.R0000644000176200001440000000332515104634254021375 0ustar liggesuserstest_that("errors thrown at block level are entraced", { f <- function() g() g <- function() stop("foo") f() }) test_that("errors thrown from a quasi-labelled argument are entraced", { foo <- function() stop("foo") expect_s3_class(foo(), "foo") }) test_that("errors thrown from a quasi-labelled argument are entraced (deep case)", { foo <- function() stop("foo") f <- function() g() g <- function() expect_s3_class(foo(), "foo") expect_s3_class(f(), "foo") }) test_that("errors thrown from a quasi-labelled argument are entraced (deep deep case)", { foo <- function() bar() bar <- function() stop("foobar") f <- function() g() g <- function() expect_s3_class(foo(), "foo") f() }) test_that("failed expect_error() prints a backtrace", { f <- function() signaller() signaller <- function() stop("bar") expect_error(f(), "foo") }) test_that("Errors are inspected with `conditionMessage()`", { rlang::local_bindings( .env = globalenv(), conditionMessage.foobar = function(...) "dispatched" ) rlang::abort("Wrong message", "foobar") }) test_that("also get backtraces for warnings", { foo <- function() bar() bar <- function() warning("foobar", call. = FALSE) foo() expect_true(TRUE) }) test_that("deep stacks are shown", { f <- function(x) { if (x > 0) f(x - 1) else stop("This is deep") } f(25) }) test_that("errors in snapshots get useful backtraces", { f <- function() g() g <- function() h() h <- function() stop("!") expect_snapshot(f()) }) # Expectations ---------------------------------------------------------------- f <- function() g() g <- function() h() h <- function() expect_true(FALSE) f() test_that("nested expectations get backtraces", { f() }) testthat/tests/testthat/reporters/fail.R0000644000176200001440000000020115047715224020177 0ustar liggesuserstest_that("two failures", { expect_true(FALSE) expect_false(TRUE) }) test_that("another failure", { expect_true(FALSE) }) testthat/tests/testthat/reporters/fail-many.R0000644000176200001440000000011114313315331021127 0ustar liggesuserstest_that("Example", { for (i in 1:11) { expect_true(FALSE) } }) testthat/tests/testthat/reporters/tests.R0000644000176200001440000000121715072252215020430 0ustar liggesuserslocal_edition(2) context("Successes") test_that("Success", { pass() }) context("Failures") test_that("Failure:1", { x <- FALSE expect_true(x) }) test_that("Failure:2a", { f <- function() expect_true(FALSE) f() }) context("Errors") test_that("Error:1", { stop("stop") }) test_that("errors get tracebacks", { f <- function() g() g <- function() h() h <- function() stop("!") f() }) context("Skips") test_that("explicit skips are reported", { skip("skip") }) test_that("empty tests are implicitly skipped", {}) context("Warnings") test_that("warnings get backtraces", { f <- function() { warning("def") } f() }) testthat/tests/testthat/reporters/long-test.R0000644000176200001440000000022114313315331021170 0ustar liggesuserstest_that("That very long test messages are not truncated because they contain useful information that you probably want to read", { fail() }) testthat/tests/testthat/reporters/successes.R0000644000176200001440000000023414313315331021260 0ustar liggesuserstest_that("two successes", { expect_true(TRUE) expect_true(TRUE) }) test_that("five more successes", { for (i in 1:5) { expect_true(TRUE) } }) testthat/tests/testthat/test-expect-self-test.R0000644000176200001440000000601015127554030021407 0ustar liggesuserstest_that("expect_failure() requires 1 failure and zero successes", { expect_success(expect_failure(fail())) expect_failure(expect_failure({})) expect_failure(expect_failure(pass())) expect_failure(expect_failure({ pass() fail() })) expect_failure(expect_failure({ fail() # Following succeed/fail are never reached pass() fail() })) }) test_that("expect_failure() can optionally match message", { expect_success(expect_failure(fail("apple"), "apple")) expect_failure(expect_failure(fail("apple"), "banana")) }) test_that("expect_failure() generates a useful error messages", { expect_no_failure <- function() {} expect_many_failures <- function() { fail() fail() } expect_has_success <- function() { fail() pass() } expect_both_wrong <- function() { pass() } expect_failure_foo <- function() fail("foo") expect_snapshot_failure({ expect_failure(expect_no_failure()) expect_failure(expect_many_failures()) expect_failure(expect_has_success()) expect_failure(expect_both_wrong()) expect_failure(expect_failure_foo(), "bar") }) }) test_that("expect_success() requires 1 success and zero failures", { expect_success(expect_success(pass())) expect_failure(expect_success({})) expect_failure(expect_success(fail())) expect_failure(expect_success({ pass() fail() })) expect_failure(expect_success({ pass() pass() })) }) test_that("errors in expect_success bubble up", { expect_snapshot(expect_success(abort("error")), error = TRUE) }) test_that("show_failure", { expect_null(show_failure(NULL)) expect_snapshot(show_failure(expect_true(FALSE))) }) test_that("expect_success() generates a useful error messages", { expect_no_success <- function() {} expect_many_successes <- function() { pass() pass() } expect_has_failure <- function() { fail() pass() } expect_both_wrong <- function() { fail() } expect_snapshot_failure({ expect_success(expect_no_success()) expect_success(expect_many_successes()) expect_success(expect_has_failure()) expect_success(expect_both_wrong()) }) }) test_that("can count successes and failures", { status <- capture_success_failure({}) expect_equal(status$n_success, 0) expect_equal(status$n_failure, 0) status <- capture_success_failure({ pass() pass() fail() }) expect_equal(status$n_success, 2) expect_equal(status$n_failure, 1) # No code run after first fail status <- capture_success_failure({ pass() fail() pass() fail() }) expect_equal(status$n_success, 2) expect_equal(status$n_failure, 2) }) test_that("expect_no are deprecated", { expect_snapshot({ expect_no_failure(pass()) expect_no_success(fail()) }) }) test_that("expect_no still work", { withr::local_options(lifecycle_verbosity = "quiet") expect_success(expect_no_failure(pass())) expect_failure(expect_no_failure(fail())) expect_success(expect_no_success(fail())) expect_failure(expect_no_success(pass())) }) testthat/tests/testthat/test-expect-reference.R0000644000176200001440000000043215054145645021450 0ustar liggesuserstest_that("succeeds only when same object", { local_edition(2) x <- y <- 1 expect_success(expect_reference(x, y)) expect_snapshot_failure(expect_reference(x, 1)) }) test_that("returns value", { local_edition(2) one <- 1 expect_equal(expect_reference(one, one), 1) }) testthat/tests/testthat/test-examples.R0000644000176200001440000000045315054053615020040 0ustar liggesuserstest_that("test_examples works with installed packages", { local_mocked_bindings(test_rd = identity) expect_true(length(test_examples()) > 1) }) test_that("test_examples fails if no examples", { withr::local_envvar(TESTTHAT_PKG = "") expect_snapshot(error = TRUE, test_examples("asdf")) }) testthat/tests/testthat/helper-assign.R0000644000176200001440000000002015104421434017766 0ustar liggesusersabcdefghi <- 10 testthat/tests/testthat/test-teardown.R0000644000176200001440000000133015047715224020043 0ustar liggesuserstest_that("teardown adds to queue", { local_edition(2) withr::defer({ teardown_reset() }) expect_length(file_teardown_env$queue, 0) teardown({}) expect_length(file_teardown_env$queue, 1) teardown({}) expect_length(file_teardown_env$queue, 2) }) test_that("teardowns runs in order", { local_edition(2) withr::defer(teardown_reset()) a <- 1 teardown(a <<- 2) teardown(a <<- 3) expect_length(file_teardown_env$queue, 2) teardown_run() expect_equal(a, 3) expect_length(file_teardown_env$queue, 0) }) test_that("teardown run after tests complete", { test_file(test_path("test-teardown/test-teardown.R"), "silent") expect_false(file.exists(test_path("test-teardown/teardown.txt"))) }) testthat/tests/testthat/test-reporter-check.R0000644000176200001440000000263315127554030021137 0ustar liggesuserstest_that("basic report works", { withr::defer(unlink(test_path("testthat-problems.rds"))) expect_snapshot_reporter(CheckReporter$new()) rds <- test_path("testthat-problems.rds") expect_true(file.exists(rds)) }) test_that("doesn't truncate long lines", { withr::defer(unlink(test_path("testthat-problems.rds"))) expect_snapshot_reporter( CheckReporter$new(), test_path("reporters/long-test.R") ) }) test_that("always shows summary", { file.create(test_path("testthat-problems.rds")) expect_snapshot_reporter( CheckReporter$new(), test_path("reporters/successes.R") ) # and cleans up testthat-problems expect_false(file.exists(test_path("testthat-problems.rds"))) }) test_that("shows warnings when not on CRAN", { withr::defer(unlink(test_path("testthat-problems.rds"))) withr::local_options("NOT_CRAN" = "true") expect_snapshot_reporter(CheckReporter$new(), test_path("reporters/tests.R")) }) test_that("generates informative snapshot hints", { withr::local_envvar(GITHUB_ACTIONS = "false", CI = "false") expect_snapshot(base::writeLines(snapshot_check_hint())) withr::local_envvar(CI = "true") expect_snapshot(base::writeLines(snapshot_check_hint())) withr::local_envvar( GITHUB_ACTIONS = "true", GITHUB_REPOSITORY = "r-lib/testthat", GITHUB_RUN_ID = "123", GITHUB_JOB = "R-CMD-check" ) expect_snapshot(base::writeLines(snapshot_check_hint())) }) testthat/tests/testthat/test-quasi-label.R0000644000176200001440000000447315072252215020424 0ustar liggesuserstest_that("atomic scalars deparsed to single values", { expect_equal(expr_label(NULL), "NULL") expect_equal(expr_label(TRUE), "TRUE") expect_equal(expr_label(1L), "1L") expect_equal(expr_label(1), "1") expect_equal(expr_label("a"), '"a"') }) test_that("symbols are quoted", { expect_equal(expr_label(quote(a)), "`a`") }) test_that("missing arguments are propagated", { x <- list(missing = missing_arg()) expect_snapshot_failure(expect_null(x$missing)) }) test_that("is_call_infix() handles complex calls (#1472)", { expect_false(is_call_infix(quote( base::any( c( veryyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy_long_name = TRUE ), na.rm = TRUE ) ))) withr::local_envvar( "_R_CHECK_LENGTH_1_LOGIC2_" = "TRUE", ) expect_true( base::any( c( veryyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy_long_name = TRUE ), na.rm = TRUE ) ) }) test_that("long vectors get ...", { long <- "123456789_123456789_123456789_123456789_123456789_123456789_" expect_equal( expr_label(c(long, long)), paste0('c("', long, '", ...)') ) }) test_that("produces useful summaries for long calls", { expect_snapshot({ expr_label(quote(foo( a = "this is a long argument", b = "this is a long argument", c = "this is a long argument" ))) expr_label(quote( arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg )) expr_label(quote( arg + (arg + arg + arg + arg + arg + arg + arg + arg + arg + arg + arg) )) expr_label(quote(function(a, b, c) { a + b + c })) }) }) test_that("other inlined other objects are deparsed", { expect_equal(expr_label(c(1, 2, 3)), "c(1, 2, 3)") expect_equal(expr_label(list(1, 2, 3)), "list(1, 2, 3)") expect_equal( expr_label(1:100 + 0), "c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, ...)" ) }) test_that("labelling compound {} expression gives single string", { out <- expr_label(quote({ 1 + 2 })) expect_length(out, 1) expect_type(out, "character") }) test_that("can label multiline functions", { expect_equal( expr_label(quote(function(x, y) {})), "`function(x, y) ...`" ) }) test_that("informative error for missing arg", { expect_snapshot(error = TRUE, expect_equal()) }) testthat/tests/testthat/test-parallel-teardown.R0000644000176200001440000000104215111027201021614 0ustar liggesuserstest_that("teardown error", { skip_on_cran() skip_on_covr() withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") err <- tryCatch( capture.output(suppressMessages(testthat::test_local( test_path("test-parallel", "teardown"), reporter = "summary" ))), error = function(e) e ) expect_s3_class(err$parent, "callr_error") expect_match( err$message, "At least one parallel worker failed to run teardown" ) expect_match( err$parent$parent$parent$message, "Error in teardown", fixed = TRUE ) }) testthat/tests/testthat/test-verify-unicode-false.txt0000644000176200001440000000006715127731052022660 0ustar liggesusers> cat(cli::symbol$info, cli::symbol$cross, "\n") i x testthat/tests/testthat/test-reporter-junit.R0000644000176200001440000000260015047715224021212 0ustar liggesuserstest_that("reporter doesn't change without warning", { skip_if_not_installed("xml2") expect_snapshot_reporter(JunitReporterMock$new()) }) test_that("permit Java-style class names", { class <- "package_name_or_domain.ClassName" expect_equal(classnameOK(class), class) }) test_that("ANSI escapes are stripped from all user text in XML", { skip_if_not_installed("xml2") tmp <- withr::local_tempfile(fileext = ".xml") reporter <- JunitReporterMock$new(file = tmp) reporter$start_reporter() text_with_ansi <- "\033[33mFirst line\033[0m\nSecond line" reporter$start_context("c") reporter$start_test("c", "t") reporter$add_result("c", "t", new_expectation("error", text_with_ansi)) reporter$add_result("c", "t", new_expectation("failure", text_with_ansi)) reporter$add_result("c", "t", new_expectation("skip", text_with_ansi)) reporter$end_test() reporter$end_context() reporter$end_reporter() expect_no_error(xml2::read_xml(tmp)) }) test_that("warnings outside context don't cause xml_add_child errors", { skip_if_not_installed("xml2") tmp <- withr::local_tempfile(fileext = ".xml") reporter <- JunitReporterMock$new(file = tmp) reporter$start_reporter() # This would previously fail with "no applicable method for 'xml_add_child'" expect_no_error({ reporter$add_result(NULL, "test", new_expectation("warning", "test")) }) reporter$end_reporter() }) testthat/tests/testthat/test-reporter-location.R0000644000176200001440000000013215053661631021666 0ustar liggesuserstest_that("reporter as expected", { expect_snapshot_reporter(LocationReporter$new()) }) testthat/tests/testthat/test-snapshot-serialize.R0000644000176200001440000000201315047715224022043 0ustar liggesuserstest_that("single test case can roundtrip", { x <- list(test = '[1] "x"') x_snap <- snap_to_md(x) x_lines <- strsplit(x_snap, "\n")[[1]] y <- snap_from_md(x_lines) expect_equal(x, y) }) test_that("multiple tests can roundtrip", { x <- list(foo = c("a", "b"), bar = "d", baz = letters[1:3]) x_snap <- snap_to_md(x) x_lines <- strsplit(x_snap, "\n")[[1]] y <- snap_from_md(x_lines) expect_equal(x, y) }) test_that("snapshots always use \n", { path <- withr::local_tempfile() x <- list(foo = c("a", "b"), bar = "d", baz = letters[1:3]) write_snaps(x, path) snap <- brio::read_file(path) has_cr <- grepl("\r", snap, fixed = TRUE) expect_equal(has_cr, FALSE) }) test_that("snap_from_md handles missing final newlines", { one_newline <- withr::local_tempfile( fileext = ".md", lines = c( "# test_case", "", "result1", "", "---", "", "result2" ) ) expect_equal( read_snaps(one_newline), list(test_case = c("result1", "result2")) ) }) testthat/tests/testthat/width-80.txt0000644000176200001440000000066415127731047017236 0ustar liggesusers [1] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 [19] 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 [37] 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 [55] 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 [73] 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 [91] 91 92 93 94 95 96 97 98 99 100 testthat/tests/testthat/test-deprec-condition.R0000644000176200001440000000057315077746012021460 0ustar liggesuserstest_that("is_informative_error is defunct", { expect_snapshot(is_informative_error(TRUE), error = TRUE) }) test_that("capture_warnings can ignore deprecation warnings", { foo <- function() { lifecycle::deprecate_warn("1.0.0", "foo()") } expect_warning( expect_equal(capture_warnings(foo(), TRUE), character()), class = "lifecycle_warning_deprecated" ) }) testthat/tests/testthat/test-bare.R0000644000176200001440000000037615044763502017141 0ustar liggesusersexpect_equal(2, 2) expect_true(TRUE) expect_error(stop("!")) stopifnot( tryCatch( expect_true(TRUE), expectation_failure = function(e) FALSE ) ) stopifnot( tryCatch( expect_true(FALSE), expectation_failure = function(e) TRUE ) ) testthat/tests/testthat/test-parallel-crash.R0000644000176200001440000000102015047715224021106 0ustar liggesuserstest_that("crash", { skip_on_cran() skip_on_covr() skip_if_not(getRversion() >= "4.4.0") withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") pkg <- test_path("test-parallel", "crash") err <- callr::r( function() { tryCatch( testthat::test_local( ".", reporter = "summary", stop_on_failure = FALSE ), error = function(e) e ) }, wd = pkg ) expect_s3_class(err, "testthat_process_error") expect_equal(err$test_file, "test-crash-3.R") }) testthat/tests/testthat/test-parallel-startup.R0000644000176200001440000000062615047715224021523 0ustar liggesuserstest_that("startup error", { skip_on_covr() withr::local_envvar(TESTTHAT_PARALLEL = "TRUE") err <- tryCatch( capture.output(suppressMessages(testthat::test_local( test_path("test-parallel", "startup"), reporter = "summary" ))), error = function(e) e ) expect_s3_class(err, "testthat_process_error") expect_match(conditionMessage(err), "This will fail", fixed = TRUE) }) testthat/tests/testthat/setup.R0000644000176200001440000000037215045447370016412 0ustar liggesuserswriteLines( "If you see me, something has gone wrong with old-school teardown", "DELETE-ME" ) writeLines( "If you see me, something has gone wrong with new-school teardown", "DELETE-ME-2" ) withr::defer(unlink("DELETE-ME-2"), teardown_env()) testthat/tests/testthat/test-expect-no-condition.R0000644000176200001440000000452515054145645022121 0ustar liggesuserstest_that("expect_no_* conditions behave as expected", { # base R expect_snapshot_failure(expect_no_error(stop("error"))) expect_snapshot_failure(expect_no_warning(warning("warning"))) expect_snapshot_failure(expect_no_message(message("message"))) # rlang equivalents expect_snapshot_failure(expect_no_error(abort("error"))) expect_snapshot_failure(expect_no_warning(warn("warning"))) expect_snapshot_failure(expect_no_message(inform("message"))) }) test_that("expect_no_* pass with pure code", { expect_success(out <- expect_no_error(1)) expect_equal(out, 1) expect_success(expect_no_warning(1)) expect_success(expect_no_message(1)) expect_success(expect_no_condition(1)) }) test_that("expect_no_ continues execution", { b <- 1 expect_failure(expect_no_warning({ warning("x") b <- 2 })) expect_equal(b, 2) }) test_that("expect_no_* don't emit success when they fail", { expect_snapshot_failure(expect_no_error(stop("!"))) }) test_that("capture correct trace_env (#1994)", { status <- capture_success_failure( expect_warning(expect_error(stop("oops"))) ) expect_equal(status$n_success, 1) # from expect_error() expect_equal(status$n_failure, 1) # from expect_warning() }) test_that("unmatched conditions bubble up", { expect_error(expect_no_error(abort("foo"), message = "bar"), "foo") expect_warning(expect_no_warning(warn("foo"), message = "bar"), "foo") expect_message(expect_no_message(inform("foo"), message = "bar"), "foo") expect_condition( expect_no_condition(signal("foo", "x"), message = "bar"), "foo" ) }) test_that("only matches conditions of specified type", { foo <- function() { warn("This is a problem!", class = "test") } expect_warning(expect_no_error(foo(), class = "test"), class = "test") }) test_that("matched conditions give informative message", { foo <- function() { warn("This is a problem!", class = "test") } expect_snapshot(error = TRUE, { expect_no_warning(foo()) expect_no_warning(foo(), message = "problem") expect_no_warning(foo(), class = "test") expect_no_warning(foo(), message = "problem", class = "test") }) }) test_that("deprecations always bubble up", { foo <- function() { lifecycle::deprecate_warn("1.0.0", "foo()") } expect_warning( expect_no_warning(foo()), class = "lifecycle_warning_deprecated" ) }) testthat/tests/testthat/test-make-expectation.R0000644000176200001440000000035115047715224021460 0ustar liggesuserstest_that("make_expectation returns and prints expectation", { x <- 1:5 out <- capture_output( expect_equal(make_expectation(x), bquote(expect_equal(x, .(1:5)))) ) expect_equal( out, "expect_equal(x, 1:5)" ) }) testthat/tests/testthat.R0000644000176200001440000000005114164710003015227 0ustar liggesuserslibrary(testthat) test_check("testthat") testthat/MD50000644000176200001440000010757215130664352012443 0ustar liggesusersb4fc1f0285bc778e3302b56f267be14a *DESCRIPTION d0b909bd833cd58ea0430eda333b6447 *LICENSE 3d1cd02b8926010c145525fc51bf52f4 *NAMESPACE 4b6d8f7ea8dcb001e0b59ff93d213c20 *NEWS.md 8eba1b724e9398d8cf62c8804581731a *R/auto-test.R 8e5500caed8e09622130d1d83b1bd094 *R/capture-output.R 30cfe0f41a9acad796ddcd1fede3bbac *R/colour-text.R af2e6bc24f35ac9c6758b6df32bd0a88 *R/compare.R 26530fa797e63d4c873c36cab56e7fb5 *R/context.R 38ff301ee323b94e16b74661ac25e577 *R/deprec-condition.R 0fffe9159bfd9d6166452761eefd4837 *R/describe.R d8a5bc59c95a5cbf2381414262918ca0 *R/edition.R 7cc18a90780f826141f1350f911cac5b *R/evaluate-promise.R 547ef939875446ef7ec4d7ebff949efb *R/examples.R 1a8319a96077309f94840ffca235f8e3 *R/expect-all.R f6735225ddb77dd878ae9d7ada4c934c *R/expect-comparison.R d8b477139ad0f442dde601e5e5cef7cc *R/expect-condition.R 238cd32627f701f2171bb23aea4ddb3f *R/expect-constant.R 732e85a2b4bf34170fd3c19f5e0f8d13 *R/expect-equality.R 84bfe06e696882920e64161b5246e6fc *R/expect-inheritance.R f3adef17441b6992373d16ccd93ec201 *R/expect-invisible.R 5659655ec77ac48e8a715cbb184147e1 *R/expect-known.R 4be5cfe03634524340012a4683ef0ac9 *R/expect-match.R 075c9fa698d49113ce2e21e58292961e *R/expect-named.R 839ffccf1a1860c3d9e5664271814d50 *R/expect-no-condition.R f22d22af627bd66b5209fcb7627d5811 *R/expect-output.R 03876aa3739b2348d4cfd60cc0785493 *R/expect-reference.R 3687be82ee6261195160906c5d83e280 *R/expect-self-test.R 7c536d4634e3c5d29a0142b23cf63ac4 *R/expect-setequal.R a661c2251f81cb4804a5f52f6c5cd9fe *R/expect-shape.R 3be573a504c57cdf0d1725cae64bfd1a *R/expect-silent.R 2bfa335b9a886c8f4190d9e73b6d961a *R/expect-that.R 12367c9ed0a636f4313b685ac71fa40a *R/expect-vector.R e309187d4686d37056d8e4773cdba2dd *R/expectation.R 825bf543bdedbdaed8056c01b8286d1a *R/extract.R 70258ae742d15ec71336f7a42bf23956 *R/import-standalone-obj-type.R 754e065247c6c0636c3fe8440af3bfc9 *R/import-standalone-purrr.R 2bc7dec97ccacd882cc078257cb591bb *R/import-standalone-types-check.R 2dc20d9271f7b24893217ede89caa7a6 *R/local.R a2a562b4b752424a7a66c1bd11e8d718 *R/make-expectation.R d29216371f88e053ef47606f27af1367 *R/mock-oo.R fe564fb641e5a4b0f6a7ec6ce1a494d0 *R/mock.R 4c21d67d522ee92f9764308395d337c0 *R/mock2-helpers.R adf64f1cbd651321c812c96c80aa8718 *R/mock2.R 45a3af286bdd5354f5b1c9910007ffe3 *R/old-school.R e7308de1f5265e31888960845e4975a0 *R/otel.R 070d7d5fce369aacc9c5e3aa3f66a7ce *R/parallel-config.R e802420997c4ec208a887768b47630fb *R/parallel-taskq.R 2d6af03340ec935f7b75aa925fdf4d12 *R/parallel.R f73b93068bc5d038917e22705127091c *R/praise.R 2dce5c099857e7c08eb6f6977b807e00 *R/quasi-label.R 4500222899616c88c01c202d3a2b1c80 *R/reporter-check.R 652cdf78a5873f84b942275f65be28aa *R/reporter-debug.R 88afafc5634e57671aa9c1212e98390e *R/reporter-fail.R 6f1115724c7bb77c07fa50cad4e08d35 *R/reporter-junit.R 175ff4f5c78813797d415c8b6b5c38a8 *R/reporter-list.R 83fac8567512d29414649427dcd27075 *R/reporter-llm.R 2fbff9c3d2c44e2bdade9e0ce3bf5fe8 *R/reporter-location.R 569ca89a3f79fde1ecacf450e7cb2564 *R/reporter-minimal.R db7e70cc445d7812d8da1996a481ed10 *R/reporter-multi.R b890f3b8b6cb834974cc9625c15406b0 *R/reporter-progress.R 65c7e9b2531ad9cfe91eb40fcb07475a *R/reporter-rstudio.R 7fb08e1ef86219b2efc5369efbbfbe11 *R/reporter-silent.R d0e1905fd5347cf103b216d0f860748e *R/reporter-slow.R 66be7f2e2705a2acd98d054662bc5419 *R/reporter-stop.R 7a912e85965ca85ac694080cc4658748 *R/reporter-summary.R 2c111b1127e3b3a2f075a9e06f889f29 *R/reporter-tap.R 3e5ec99bfd6aeee7c2841730bad98c48 *R/reporter-teamcity.R 18b468f6f5ad256bd82ebeccabb1babf *R/reporter-timing.R a3a0f074f24b3142bd74d981d7ca3bde *R/reporter-zzz.R 14e27e9d923788c8f6e21c7a15abacd8 *R/reporter.R 4ec3c33b6092a158da61a39e034da1aa *R/skip.R 2dc5bd41e9ef2915c5ff076484b8bd09 *R/snapshot-cleanup.R aab0488c6025d75e40489884e212ad76 *R/snapshot-file-snaps.R 754541d18f537065c4b166d222b31505 *R/snapshot-file.R 23a474d682e58f8e9a7187dae4dd7e7d *R/snapshot-github.R 77e0fb7ec6be691e455a2be0f7fea977 *R/snapshot-manage.R 879bfe70f8ee94cad98beee9b937548e *R/snapshot-reporter-parallel.R 108ad8a222052e7c95dceb324a641f06 *R/snapshot-reporter.R 1343bc3fcca46995f6e53f17006e4433 *R/snapshot-serialize.R 5009efdcb1cafda9fc63d2d2abbea00d *R/snapshot-value.R 3857054936183093f4e9bdd822e2b718 *R/snapshot.R e390305f86acc560d72fbaeff9737ce6 *R/source.R 95d3de6ee127dbc6864f84cc55a61dc4 *R/srcrefs.R 96bd69a55783101b69067ca2e8cbab20 *R/stack.R 95c07e077d9321e55e82ccf09c574210 *R/teardown.R 2b21c80d79c2e7e7267cef3a08ecf347 *R/test-compiled-code.R 5c8e02665596da3b960254082456e2b9 *R/test-env.R 812267d1b1b45d6b845c90113d675db6 *R/test-example.R dd10afa06966da713c050c9ce82916a3 *R/test-files.R 184448fd613a0ac87d0c0ede08ecc253 *R/test-package.R 67f007158a86a957f09141c4034f59b2 *R/test-path.R e3da30f469dd1f8206ed4eb655fdb79e *R/test-state.R 88699e8c0d79cca6d2423c661b4cfb21 *R/test-that.R fdf67d7c812e2c83c3635f2ccbeecb3d *R/testthat-package.R a8d1228a26567eb1d49ff65476ddab67 *R/try-again.R 6eb3c2bea6feee757e9e7570b045a4c7 *R/utils.R dbd17db5d4cd1c1c087d829851687c78 *R/verify-output.R 92cf07e63a1717b010d982522fc5ad44 *R/watcher.R 183dee744b690bf8ea41a67bc505da69 *README.md 0feb712c3b6ff40fc542eb1ccc9967cf *build/vignette.rds f71b84e74c1e6b9d9e8d3a60324258a2 *inst/CITATION 22f6c9e5a18c097fc37060cda42813d0 *inst/doc/challenging-tests.R 5882704a5ce3db4271f901a685641476 *inst/doc/challenging-tests.Rmd 53a6d60a4861a5239da5625beea672e2 *inst/doc/challenging-tests.html 7974606a25f8bd4be631253479c1d700 *inst/doc/custom-expectation.R f6262fb833b8d17aa87f711050a13dbc *inst/doc/custom-expectation.Rmd 5047b6035f2e0ca8842db91bf0c761b5 *inst/doc/custom-expectation.html a421b22edc9a20bdbda52e8de7a343eb *inst/doc/mocking.R cb2ab7500571c5da4127bc8736bf2862 *inst/doc/mocking.Rmd bf23753df7420a1d918bd8b354179d8e *inst/doc/mocking.html d76c7a669f99acd6eb21ddc5529934fe *inst/doc/parallel.R c51bcdddbfee109ac065b1601e1fff43 *inst/doc/parallel.Rmd 15c69fb72083bfd41ecbe17401a78777 *inst/doc/parallel.html 65fca1138202398e366618ec1fc4e60f *inst/doc/skipping.R a4e543566b5aa47bc9fc4d4687e312ee *inst/doc/skipping.Rmd b780846d1ccf13423051610e0ba97c03 *inst/doc/skipping.html af924980be70fbf1a22d305000a43cb6 *inst/doc/snapshotting.R 7dfb290b89829b01c1711297ab3fe785 *inst/doc/snapshotting.Rmd da659651ec8d59759d09b9aff9636a6b *inst/doc/snapshotting.html 6387312637a22d4393dd2a79b4b68553 *inst/doc/special-files.R 97def4ca015ee8145ff7af69249b5e67 *inst/doc/special-files.Rmd dbd1ce2f7c9b10f6cb3f2385660b7989 *inst/doc/special-files.html f10ff1d50c36b374471639edb789b460 *inst/doc/test-fixtures.R 3926ff58fb28eb857060181fdc3656ad *inst/doc/test-fixtures.Rmd da78b29925701bbec6884d7917d3e0ef *inst/doc/test-fixtures.html 554146087fc852ba04ff0eab33006b5e *inst/doc/third-edition.R 2251710e467ca3b0578655b64509b686 *inst/doc/third-edition.Rmd 04ae6999318999c1c698cebd48423445 *inst/doc/third-edition.html 1ece0759f60e98193d53545648d19146 *inst/examples/test-failure.R 522cf134c79624117d579f2c4d18f42b *inst/examples/test-success.R 543392950ccc39ed50ebf26c75de0910 *inst/include/testthat.h 05f098fedb1fe85ee85de806f711779f *inst/include/testthat/testthat.h d337a6f13ed335d6e4b2258989df0c54 *inst/include/testthat/vendor/catch.h f481e7613929d4968a4a4c73031f7967 *inst/resources/catch-routine-registration.R f35a879e88834e75aee99fc9817547a0 *inst/resources/test-cpp.R 3332a4affe5b211b37b65a3d3311d9ba *inst/resources/test-example.cpp c2cb0c30e10da611f9fbe56aea52f7a9 *inst/resources/test-runner.cpp 10f4212c06d4fcddcab80b32903a2ae8 *man/CheckReporter.Rd 3d57c35e031121505768d62b371c3f0a *man/DebugReporter.Rd 4abf7813bdcd0466e3457957c5ad892c *man/FailReporter.Rd e20648f41672ec9d9b24ef885656e82e *man/JunitReporter.Rd 7c23ebe17c911b46063eaf99161042e0 *man/ListReporter.Rd 29bfcb6d94bbce63cb99025a8b271499 *man/LlmReporter.Rd 75cc5667031262f0384b1944cd5a1212 *man/LocationReporter.Rd f45628caf8ed7cad994784a22589f1f2 *man/MinimalReporter.Rd 707afe85bbb559fb50ac0fef3d460bf1 *man/MultiReporter.Rd c6ff24b7d3c4d8e88f79a411eff7cae9 *man/ProgressReporter.Rd 13124254fba5e9006652b324abde355d *man/RStudioReporter.Rd b69d7618c2d0df7db1e57ea8b2b1e02b *man/Reporter.Rd 0e9a61c094a02313595afe5cd41084c8 *man/SilentReporter.Rd 7efd0d12fff59fb9a0647582a19e9692 *man/SlowReporter.Rd 5266fbeaf3ad7dfe011d021deee56ad6 *man/StopReporter.Rd deb3fea2b837bda82f87d1921ab2aad3 *man/SummaryReporter.Rd 71bffaca3f9f5166dee6581812a75946 *man/TapReporter.Rd 13d408d4e511e962e1b36273199848fd *man/TeamcityReporter.Rd 6f66988e119f4fd90c1931ac014f336f *man/auto_test.Rd 5adb8f905594c50b79b777c86a9b32da *man/capture_condition.Rd 6d79d954a3742bf9c545015fdc66ba43 *man/capture_output.Rd 28a2d50e7fbb9e4471bd73c56559c260 *man/compare.Rd ea1c54d24c6395c0f988d14b0ae40956 *man/compare_state.Rd 11a502873975800472f9a9494dab8e76 *man/comparison-expectations.Rd 814661cc0285f178d3e01259f3e4ce8f *man/context.Rd 3ffd0eedcd434635ea708cb81e763fe1 *man/context_start_file.Rd 0a9667e3b86387bb7933446e3653aac6 *man/default_reporter.Rd 9c745cb9f25278da5a962e0cc2d70cff *man/describe.Rd f2b7ae4da7936bcf6a6d13a161c451ae *man/dir_state.Rd 5a576459eaa65e0a5e7d6ecdab52a408 *man/equality-expectations.Rd 83120acf2b4d08657f53db8ec927e470 *man/evaluate_promise.Rd 2491c48bd603915995de834d9b0842dc *man/expect.Rd dd63eb613276b3c755efadfd863a8c2a *man/expect_all_equal.Rd 769bbe959c0248c944489a02c2166547 *man/expect_equivalent.Rd c962f6d2d97c81a03b4b0519d43d3f28 *man/expect_error.Rd b6a56e696e49eca77f806f2aeaaae61e *man/expect_invisible.Rd 7c8231c847b79916594714e8e7e74d65 *man/expect_is.Rd 0a75a6350dbafd57ee5e8c01beacd04d *man/expect_known_output.Rd ebbb773fd177d9fb179829ebdf9b7ae4 *man/expect_length.Rd c56df08d2028bd283bc3aa4fec3263e7 *man/expect_less_than.Rd a46a050ca28687b3570c26e1497a0a1a *man/expect_match.Rd 9a7117175363983d6809fa8f6c4821f6 *man/expect_named.Rd 35a9e1f1dcb330ffc9d263d5f18c71dc *man/expect_no_error.Rd 2502d6b0c2728823f4d17c166e970f39 *man/expect_no_success.Rd 968f66c28e20338368e5823e8ae52cd8 *man/expect_null.Rd 1fb689441cd8d5663720de79b6d1c659 *man/expect_output.Rd 8d8dd2a9865b0910ba4a199e06887f9d *man/expect_output_file.Rd 59b4317633949cf20cf2d78d0f25da49 *man/expect_reference.Rd 509f42280f0f257a235ea0ba6dab1ee8 *man/expect_setequal.Rd 13185e0405da870ad2cec9e4e592baf4 *man/expect_silent.Rd 9811d5291b62865dbd2267772069ffaa *man/expect_snapshot.Rd fa164cfe2e22b44bdc8f04ca9af18a2d *man/expect_snapshot_file.Rd 0fa02b74cf6be9ac68c0f88be813c1e9 *man/expect_snapshot_output.Rd 90f5d84d5cf583f66f84aaed4eb04cd2 *man/expect_snapshot_value.Rd fc506de720caa4e75bdc46d05a1214af *man/expect_success.Rd b8ed330388b7689ecc975a7946e030c9 *man/expect_that.Rd adc37f469564fd2c4be7d6bff5111479 *man/expect_vector.Rd f7de871f8cff1a3d37f4a6cdb5bceef8 *man/expectation.Rd b2f6d39e15b2db07a9668623809c73c4 *man/extract_test.Rd 82af7adb1284d6505c9326e9ca7b316a *man/fail.Rd cb1e46f469cfbbbde29c8b5113e1d789 *man/figures/lifecycle-archived.svg c0d2e5a54f1fa4ff02bf9533079dd1f7 *man/figures/lifecycle-defunct.svg 391f696f961e28914508628a7af31b74 *man/figures/lifecycle-deprecated.svg 691b1eb2aec9e1bec96b79d11ba5e631 *man/figures/lifecycle-experimental.svg 952b59dc07b171b97d5d982924244f61 *man/figures/lifecycle-maturing.svg 27b879bf3677ea76e3991d56ab324081 *man/figures/lifecycle-questioning.svg 6902bbfaf963fbc4ed98b86bda80caa2 *man/figures/lifecycle-soft-deprecated.svg ed42e3fbd7cc30bc6ca8fa9b658e24a8 *man/figures/lifecycle-stable.svg bf2f1ad432ecccee3400afe533404113 *man/figures/lifecycle-superseded.svg 3c2e51376e674c859e42e70bc95b2b9c *man/figures/logo.png bb0e79b8bdc83b519a64be149499c48c *man/find_reporter.Rd e92deedee075ce21672aa7c6e81e3f5b *man/find_test_scripts.Rd 267e89763b0eb7a514ff930aa735b406 *man/inheritance-expectations.Rd 1bec47b13d94110b9b4f18e0684a9597 *man/is_informative_error.Rd ca91d976e00068605941629296c6327c *man/is_testing.Rd 23a360c334958d1a41f21768524bf3f2 *man/local_edition.Rd 484ef8f479fba3a0019b5794c9f4cdeb *man/local_mocked_bindings.Rd 4c1a92fc44ba7fd6363f07b2348c028d *man/local_mocked_r6_class.Rd 12ce4eb5e83cdbf489c761b3eeeb7348 *man/local_mocked_s3_method.Rd aa9a7d6bd465ed17fc59c1e5679b1581 *man/local_snapshotter.Rd 50fed2cedf8fa062cc671125fa72f390 *man/local_test_context.Rd 72081c5d88493c1808f7ac0ebf6b46f0 *man/local_test_directory.Rd f4a1ee7b94d59ef80a8e912cb7116dcc *man/logical-expectations.Rd 8aa87707c6a53a51dcc2a913a93154bd *man/make_expectation.Rd 93b6d2224d9f0878bcc44c4a3f04a2cf *man/mock_output_sequence.Rd 5fe2f28d6d3329a05937360bc9be5a7f *man/not.Rd 35508e8a9cc50be5772198d541ce6f1d *man/oldskool.Rd 1aea5f00707a4dd2406b0e97fca9ac8c *man/quasi_label.Rd b8f8a1cbf3a5afc81d1df65fa2f2df03 *man/reexports.Rd efdd9f36891d19bf8d083d569c6ab8d6 *man/reporter-accessors.Rd 99a203507a521441bd629594b424c0c2 *man/run_cpp_tests.Rd 6781411294bdf9c6e05c0e64585d8d42 *man/set_max_fails.Rd 23345f04b7e76ae301a0ee724e640e5a *man/set_state_inspector.Rd 281079c925790d5f90e9b115440487a5 *man/skip.Rd 035ea598e10258b49c9c50a260799074 *man/skip_on_travis.Rd 009035d144385cbcc5667f1341adc4fa *man/snapshot_accept.Rd 885df079d4ecb1ca79d85199548a1f16 *man/snapshot_download_gh.Rd 0515fd6c5571dd8b196cc8903705ee0a *man/source_file.Rd 7e2ec4a3e670a6cae0ae9a6fe08f51bd *man/succeed.Rd 8b7e53a2eb81a7448c7fc7d4e26c1623 *man/takes_less_than.Rd dfe3cdc79671f3b5f56be60ddf09fb35 *man/teardown.Rd da7c3d8b9499eba0e5b074d16d91cd07 *man/teardown_env.Rd 92f9d6fcef6ccfbc0c8b849c58bd9301 *man/test_dir.Rd 1069e33073c62ed24b08bd0f30dff578 *man/test_env.Rd 44da3653452f5d2f415cbe2d50366bca *man/test_examples.Rd b239b246844020b5b249c559f77d1d82 *man/test_file.Rd 3cdb800f2eaa70a2fd1db25ad9d92c90 *man/test_package.Rd 25c62f91815d41138a8575a3c81b79f8 *man/test_path.Rd 137a9b05ecf5970b6345a07f0aaf4533 *man/test_that.Rd f2d5b97613f93eb2487e015796f89387 *man/testthat-package.Rd 0bcfb93dcf04593de0b0a9120bc6ee23 *man/testthat_examples.Rd 79ee52da3640f0c553c36954613485f7 *man/testthat_results.Rd aa97db269c5450ec1894f3fe5b7971bd *man/testthat_tolerance.Rd 0307c6acc209355739d6149e3a621f76 *man/topic-name.Rd 466de136243b0d435937b05cdacb5bb0 *man/try_again.Rd ed93565626bbff53470a0f1af1b987e5 *man/use_catch.Rd dc4e504aee4132d8fcb986d8e5bbdf17 *man/verify_output.Rd bbfb2652963f2f8d61c5168626133b22 *man/watch.Rd 9205e235994ccec1bdc882ee19dfbae0 *man/with_mock.Rd d09410b8ca2729ce83dca3d0d4359f3e *src/Makevars d09410b8ca2729ce83dca3d0d4359f3e *src/Makevars.win 509e8303a810a1af744a1123b397586c *src/init.c 69850efbfde51c34daccd28048c41e87 *src/test-catch.cpp 3332a4affe5b211b37b65a3d3311d9ba *src/test-example.cpp c2cb0c30e10da611f9fbe56aea52f7a9 *src/test-runner.cpp 86632b27463fc130630b37a9968cec06 *tests/test-catch.R 0eb375d3daee2350d0f7f39aa5b0f1a6 *tests/testthat.R 72787795c4ab164a3c23d8eb90ccbef4 *tests/testthat/_snaps/R4.1/snapshot-file/version.txt b9caae6804a221e5be304f5274cfd5eb *tests/testthat/_snaps/R4.1/snapshot.md 1352cc8a749cacaafacdf2704bcc8192 *tests/testthat/_snaps/R4.2/snapshot-file/version.txt d18ee4d56ba68c8608c19201adb25951 *tests/testthat/_snaps/R4.2/snapshot.md 6652feeae3ce72c1ae747e6728cc5f0d *tests/testthat/_snaps/R4.3/snapshot-file/version.txt 661953de7fcbaf87ffc4ae0a398952d1 *tests/testthat/_snaps/R4.3/snapshot.md 3648669dda87ebe93b71dc4129f9ef4a *tests/testthat/_snaps/R4.4/snapshot-file/version.txt 9132ce9ee9ccc3aebadfa0e3fd522a26 *tests/testthat/_snaps/R4.4/snapshot.md ece51e3182e016269c143cd1d186327d *tests/testthat/_snaps/R4.5/snapshot-file/version.txt 997ebb0e0b6ad28bee689120acf2da1b *tests/testthat/_snaps/R4.5/snapshot.md 7bfb45cbc546ddef570ad5abd6a250c6 *tests/testthat/_snaps/R4.6/snapshot-file/version.txt 5304dca76ee96cb09afcdd178591fdad *tests/testthat/_snaps/R4.6/snapshot.md fff0b4681b0e803bd662ef49dcf4914f *tests/testthat/_snaps/deprec-condition.md 99c133177ff6171324f71a5417112c7d *tests/testthat/_snaps/describe.md 5818b38a0d16d1c0e2ee9bf9bffa369c *tests/testthat/_snaps/edition.md 6d3773108b0766a3df5411affbcda94c *tests/testthat/_snaps/examples.md 9faed24d648d03fcfbf0433285f3424f *tests/testthat/_snaps/expect-all.md 0df5d2c0e0cd655894da79808576a9e8 *tests/testthat/_snaps/expect-comparison.md dbe71eb3236a824e4e6fb4f926138c2e *tests/testthat/_snaps/expect-condition.md 23dd06314567c74ac37a2125631f002e *tests/testthat/_snaps/expect-constant.md 2ddadccde4f1c85e9ac8a3d80db62b57 *tests/testthat/_snaps/expect-equality.md 692a9a5f2b86063bc09c83a97fe651c6 *tests/testthat/_snaps/expect-inheritance.md cf6cde7e724951aad1c2a67f6488c92e *tests/testthat/_snaps/expect-invisible.md 2b702f4fd66d2d139ff45f2a9ea2256d *tests/testthat/_snaps/expect-match.md 6b0883aa267267a11476c5fca754afad *tests/testthat/_snaps/expect-named.md aa8b27d7bea97b230c48a5a5b8641a0c *tests/testthat/_snaps/expect-no-condition.md 5f33a95af15876e12bf899e54a5187bf *tests/testthat/_snaps/expect-output.md a0324db7af294fd3531739c9b5ff4778 *tests/testthat/_snaps/expect-reference.md 21c438ac44393ab52f1525d543fddd53 *tests/testthat/_snaps/expect-self-test.md ffeecd9a92b10a2caf258df6fa78905d *tests/testthat/_snaps/expect-setequal.md b5020d6770a2f4203590318782005e64 *tests/testthat/_snaps/expect-shape.md 74e81f9177ee7e9fd184ca02b2eb2f37 *tests/testthat/_snaps/expect-silent.md 2ea95a171747d3c14ee75b1a643de645 *tests/testthat/_snaps/expect-vector.md e95a5dabb683d48b10fcd8384d2a4117 *tests/testthat/_snaps/expectation.md 2f2eb3d9800c5050d62f4a60953d9772 *tests/testthat/_snaps/extract.md 7087410598986fa8e8364dc8afd33d75 *tests/testthat/_snaps/local.md e5c6f4b989ea4c7e2f0987b35a82cce7 *tests/testthat/_snaps/mock-oo.md 1ebc1b6ed27fd3f8d2ba6d1050babc9a *tests/testthat/_snaps/mock.md c314db35527ac3db7e822861baceac7f *tests/testthat/_snaps/mock2-helpers.md bf3aa39d37c6be159ad9505ce3cbd333 *tests/testthat/_snaps/mock2.md d56577d5c51141a37912e35c77f36c5c *tests/testthat/_snaps/parallel.md 0349b83595d1b4e5abc885911b1b9119 *tests/testthat/_snaps/quasi-label.md 8cef6989d17554579db340ba0e9fa585 *tests/testthat/_snaps/reporter-check.md 5d0fe4d3bf721d4e3c3f341bb6c36953 *tests/testthat/_snaps/reporter-debug.md 35cc002c93b95c4fdd270b3bb8154f3e *tests/testthat/_snaps/reporter-junit.md 9eaaf9f1913bce4b1def9a1825a6963b *tests/testthat/_snaps/reporter-list.md b91241c481ef654baaaf82f86c787cd3 *tests/testthat/_snaps/reporter-llm.md 85999573c5b556660a046770a748b738 *tests/testthat/_snaps/reporter-location.md d18dd86f32e3226cfe5a295c88d1893c *tests/testthat/_snaps/reporter-minimal.md 0d15055137cb5f1a02f196e10aa45de0 *tests/testthat/_snaps/reporter-progress.md aac95eae5dbad582fae2c1663218e07a *tests/testthat/_snaps/reporter-rstudio.md d135a16952dcec53f28dbf91c25cb396 *tests/testthat/_snaps/reporter-silent.md a0489f1e35072991d52c0f34d2785a6a *tests/testthat/_snaps/reporter-slow.md d5077bd37e17a747c318c31d51d6da08 *tests/testthat/_snaps/reporter-stop.md 4a71f91dea85efac6ef73742956bc28e *tests/testthat/_snaps/reporter-summary.md fd518575b1281a4c597ffa0efa58ba0a *tests/testthat/_snaps/reporter-tap.md de6d26f3ff85236c68ce68dd37badc65 *tests/testthat/_snaps/reporter-teamcity.md 351043b35a5435f88421c52d5e00e593 *tests/testthat/_snaps/reporter-zzz.md cbe208856d70c9f36ebd7778ec14fe22 *tests/testthat/_snaps/reporter.md 77d8ebd744082b0bd6bd6ba6fc58f3bc *tests/testthat/_snaps/skip.md c52b1750ddf76116a360752fee817051 *tests/testthat/_snaps/snapshot-cleanup.md 1cd285939dab3c517343bc921c6c020b *tests/testthat/_snaps/snapshot-file.md 933222b19ff3e7ea5f65517ea1f7d57e *tests/testthat/_snaps/snapshot-file/a.txt 4e6fd5274ee5b52e824f6abbc5730385 *tests/testthat/_snaps/snapshot-file/foo-not-binary.csv 4e6fd5274ee5b52e824f6abbc5730385 *tests/testthat/_snaps/snapshot-file/foo.csv 241e8f7ec356602f89a684b4bbf36456 *tests/testthat/_snaps/snapshot-file/foo.png b7fdd99fac291c4bbf958d9aee731951 *tests/testthat/_snaps/snapshot-file/foo.r dd7194eb0b78cf0086ed31cc60e473a3 *tests/testthat/_snaps/snapshot-file/secret.txt 75c94db6d03a41b51a258369b0bc034f *tests/testthat/_snaps/snapshot-manage.md 865d25ae834cfc032e0d7a509758780c *tests/testthat/_snaps/snapshot-reporter.md 17e1729b960bc28102373253a407622c *tests/testthat/_snaps/snapshot-value.md 6cfcc53f7ae36f2fee5aef425b8ccf66 *tests/testthat/_snaps/snapshot.md 075f95fdc86789042f92c84ddfe73760 *tests/testthat/_snaps/source.md 981a4810931122fdb66bfe15a274fbf9 *tests/testthat/_snaps/test-compiled-code.md 715c29896e4d59546a1c6f1acc12d953 *tests/testthat/_snaps/test-env.md 9d8f37417ff6867a65adb3b0d9447efd *tests/testthat/_snaps/test-files.md bf01ffb7d0f75681fb9c5849b3416ec7 *tests/testthat/_snaps/test-path.md 19bd088701ce41415ec6dc344a5081ed *tests/testthat/_snaps/test-state.md 1deb6208bc02c375f27de92d4499c425 *tests/testthat/_snaps/try-again.md 9fc0b81104626cb6008f9c02dbb6a433 *tests/testthat/_snaps/verify-output.md e371718f8ce69ca45ae1ec0cb3436348 *tests/testthat/context.R 3ae28194b2221a1faf5828e06bf95518 *tests/testthat/extract/simple.R 7340014f5a98c1db4a52789a689ee123 *tests/testthat/helper-assign.R cfde242811815c10019817cd919d7719 *tests/testthat/one.rds dcea1eb372bbcc1f85a556ec360bebbd *tests/testthat/reporters/backtraces.R b57f2dd170cc6d984479cb92587cae53 *tests/testthat/reporters/context.R 652090f643ef38fb1ed64cc2a37587b0 *tests/testthat/reporters/error-setup.R ccdc6f8bd60642b51e8a68e397189484 *tests/testthat/reporters/fail-many.R bf4b40e847d1e6ca4056ceda829eafd8 *tests/testthat/reporters/fail.R fc7290dc83a3ce9be1ccbcaf21fa953c *tests/testthat/reporters/long-test.R e585fd701a8d2ab8ac39d38605c2f82d *tests/testthat/reporters/nested.R e5da2b4d0dd836996ff542cf90e16dc9 *tests/testthat/reporters/skips.R 3ca346d133f7b171ac0ef155e8d7aaa9 *tests/testthat/reporters/state-change.R 6067370f05fc90a582ec2034b5d7ce45 *tests/testthat/reporters/successes.R 066953e4e45db935db891b938c99c574 *tests/testthat/reporters/tests.R cb65e68c4e96571239640fba1d557511 *tests/testthat/setup.R bf15d5dd3f1110e097d830715000eeed *tests/testthat/teardown.R 6b43c8a6b6a93727a1d9d1ffb87a416c *tests/testthat/test-bare.R 96dd20d07cfa5c9c6c2752fbd03a4763 *tests/testthat/test-capture-output.R 6bc6ae2cb5d2d68e9723b627c925f22f *tests/testthat/test-colour-text.R 6ce65b148dd361c5225467bfb51afdd6 *tests/testthat/test-compare.R 2079ca9fd0df80ac2bf4365d57f924e6 *tests/testthat/test-context.R b910a461a92e16199c62671a5c12e571 *tests/testthat/test-deprec-condition.R 3df357094e290dff65f73aa48bcb8f2c *tests/testthat/test-describe.R 23aafbc5c55ad41a6bf68ae2ecd6d3eb *tests/testthat/test-edition.R ef67f70c07f7152e1e6c2ded78e0e530 *tests/testthat/test-error/test-error.R c80cb8812821151feb6e729448ebb1ae *tests/testthat/test-evaluate-promise.R 00f217c1a1daa67f0a1313825f5f7e7a *tests/testthat/test-examples.R 47ac332e922be762f990e9aacf8f9483 *tests/testthat/test-expect-all.R edf4af7633cd3a1fc754dda69475fad2 *tests/testthat/test-expect-comparison.R 415f662b0074d7d1d3911e9fc76ef436 *tests/testthat/test-expect-condition.R d0f141c3d5bf9f51f04e41865e933c86 *tests/testthat/test-expect-constant.R fb3931ced30635b9e83d96bc096a3cae *tests/testthat/test-expect-equality.R 15a3d3449ccfffa824739d25635e835f *tests/testthat/test-expect-inheritance.R 5c66ba42cb2932a5b8cffafe07ddac6e *tests/testthat/test-expect-invisible.R 3bbddf95b158a04c989cfb766d27d6d1 *tests/testthat/test-expect-known.R 668d3c8b65fac6f1be4e1c9db3ab05da *tests/testthat/test-expect-known.txt aa61ed5cfcc4f22ac0cee000389a7bb0 *tests/testthat/test-expect-match.R e7ce37966d7cc42002e84e91a5aa5050 *tests/testthat/test-expect-named.R 5b15cdc80bef7e9ad4c21d0e485ddc35 *tests/testthat/test-expect-no-condition.R ef94c968c77717de11b90728aef873a8 *tests/testthat/test-expect-output.R e3d194da47d01f147842ef767391b02b *tests/testthat/test-expect-reference.R 16958d84cf48714756e8b4a632f5eb9b *tests/testthat/test-expect-self-test.R 05023074e43f43e308c123e3b78b3851 *tests/testthat/test-expect-setequal.R 182ed0955f7d2d9dc1d95bb711f3ffee *tests/testthat/test-expect-shape.R 682273eadfbc6a2ceb2f38a815c545e8 *tests/testthat/test-expect-silent.R a2c86f8b83fafa64f874a47f8d9406ce *tests/testthat/test-expect-vector.R c33ea36ae14aeca7ad1da4a62c0b6f3f *tests/testthat/test-expectation.R e01941c04dfefb3814458ba6b1c02bbb *tests/testthat/test-extract.R dca9d21a10fa57c3ab3d08b240855623 *tests/testthat/test-helpers.R 1ef4f0238860c46c6486f32722c24171 *tests/testthat/test-list-reporter/test-bare-expectations.R 0c8496c069c63c0ef11ee19faa7ad845 *tests/testthat/test-list-reporter/test-exception-outside-tests.R 3161831aded4882822106344d35eed7e *tests/testthat/test-list-reporter/test-exercise-list-reporter.R aaa1e95b5eaa73ec22fa705f9013a3e6 *tests/testthat/test-list-reporter/test-only-error.R 00afc66947be3c8c50835ef583385c5b *tests/testthat/test-local.R 7c94a539d5e208d9ec0fb7ec04f29b30 *tests/testthat/test-make-expectation.R ae69f57e8270cba41ab5d6605ba24d49 *tests/testthat/test-mock-oo.R 72a306f503d47d96eb326b3536eb6ccc *tests/testthat/test-mock.R 163eba8e6f6f73eb6ebccb5639f721cd *tests/testthat/test-mock2-helpers.R 57eec5806117c81e6b88a7fcf1e1820a *tests/testthat/test-mock2.R 97daeca68a374ecf966de8fa577a385a *tests/testthat/test-old-school.R 1ddb5f42f19161a45b45379e7e033f89 *tests/testthat/test-otel.R 8ab03b857119a7ee61d2fd02f1c1b49e *tests/testthat/test-parallel-crash.R 799076b73f67853f75c5f2c0ca540669 *tests/testthat/test-parallel-errors.R 1378b6090f1c88ba02e1ff22f881b298 *tests/testthat/test-parallel-outside.R 9490c91575aead392c6f3d9b0084c1dd *tests/testthat/test-parallel-setup.R f535c7464d15fe4e8a275b1baac7ebac *tests/testthat/test-parallel-startup.R 76805943c52aaeec22f7a0f8b5de72b6 *tests/testthat/test-parallel-stdout.R df6076facda9172e41fc4aafa67d1469 *tests/testthat/test-parallel-teardown.R fd752d5afc2de61eb35eca3eb72b473f *tests/testthat/test-parallel.R d1ee75fad079f010da5bfb604b84a52c *tests/testthat/test-parallel/crash/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/crash/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/crash/tests/testthat.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/crash/tests/testthat/test-crash-1.R 6eb2187975cc0ccd1b73b80b5cbab548 *tests/testthat/test-parallel/crash/tests/testthat/test-crash-2.R 1448ae861b503461676d891b65664a60 *tests/testthat/test-parallel/crash/tests/testthat/test-crash-3.R d1ee75fad079f010da5bfb604b84a52c *tests/testthat/test-parallel/fail/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/fail/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/fail/tests/testthat.R 3d9f111a1fc68c735ce3f33f0b4a98c2 *tests/testthat/test-parallel/fail/tests/testthat/test-bad.R d1ee75fad079f010da5bfb604b84a52c *tests/testthat/test-parallel/ok/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/ok/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/ok/tests/testthat.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/ok/tests/testthat/test-ok-1.R 6eb2187975cc0ccd1b73b80b5cbab548 *tests/testthat/test-parallel/ok/tests/testthat/test-ok-2.R b1871f1f6b5cfb6f63680e3d3c1aa0fa *tests/testthat/test-parallel/ok/tests/testthat/test-ok-3.R a33379cc3428a98003d46198e7074e32 *tests/testthat/test-parallel/outside/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/outside/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/outside/tests/testthat.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/outside/tests/testthat/test-outside-1.R 5dc0c3d924a44be0c073ce75ba8901fb *tests/testthat/test-parallel/outside/tests/testthat/test-outside-2.R b1871f1f6b5cfb6f63680e3d3c1aa0fa *tests/testthat/test-parallel/outside/tests/testthat/test-outside-3.R 9ec1263eb2ad029be653e7ed104c1a5d *tests/testthat/test-parallel/setup/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/setup/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/setup/tests/testthat.R 811c5e94170d2159f50691df3dcc3928 *tests/testthat/test-parallel/setup/tests/testthat/setup-bad.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/setup/tests/testthat/test-setup-1.R 6eb2187975cc0ccd1b73b80b5cbab548 *tests/testthat/test-parallel/setup/tests/testthat/test-setup-2.R b1871f1f6b5cfb6f63680e3d3c1aa0fa *tests/testthat/test-parallel/setup/tests/testthat/test-setup-3.R d1ee75fad079f010da5bfb604b84a52c *tests/testthat/test-parallel/snap/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/snap/NAMESPACE ffdc5b08f849b5fdaed5953813074d70 *tests/testthat/test-parallel/snap/tests/testthat/_snaps/snap-1.md c5e9d011aeaaa0c2498160b29e83a01b *tests/testthat/test-parallel/snap/tests/testthat/_snaps/snap-2.md c5e9d011aeaaa0c2498160b29e83a01b *tests/testthat/test-parallel/snap/tests/testthat/_snaps/snap-3.md eedc024378d603b08f4a66a7262d3603 *tests/testthat/test-parallel/snap/tests/testthat/test-snap-1.R fd34a5d0f44b05f4b58eec1f91ec4c92 *tests/testthat/test-parallel/snap/tests/testthat/test-snap-2.R a9f9122702c8c498d42260e4b06a59ce *tests/testthat/test-parallel/snap/tests/testthat/test-snap-3.R d1ee75fad079f010da5bfb604b84a52c *tests/testthat/test-parallel/startup/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/startup/NAMESPACE 2905ff9b235723ece3b60fe05f64040f *tests/testthat/test-parallel/startup/R/fail.R 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/startup/tests/testthat.R 8dc16b4aa90f99ff9c905437f885592b *tests/testthat/test-parallel/startup/tests/testthat/test-empty.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/startup/tests/testthat/test-startup-1.R 9ec1263eb2ad029be653e7ed104c1a5d *tests/testthat/test-parallel/stdout/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/stdout/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/stdout/tests/testthat.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/stdout/tests/testthat/test-stdout-1.R 609f8631dfd2960093dcafc57c20a783 *tests/testthat/test-parallel/stdout/tests/testthat/test-stdout-2.R 4bc2d66ba67b90ffa7a78ecf990ee69b *tests/testthat/test-parallel/stdout/tests/testthat/test-stdout-3.R 9ec1263eb2ad029be653e7ed104c1a5d *tests/testthat/test-parallel/syntax-error/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/syntax-error/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/syntax-error/tests/testthat.R 679d7c75a342a5f164861937f07e4eb8 *tests/testthat/test-parallel/syntax-error/tests/testthat/test-error-1.R 6eb2187975cc0ccd1b73b80b5cbab548 *tests/testthat/test-parallel/syntax-error/tests/testthat/test-error-2.R 71ef3908290c54ef643890998d61099b *tests/testthat/test-parallel/teardown/DESCRIPTION dc21c19f0d6968ee25d441b2cf46017d *tests/testthat/test-parallel/teardown/NAMESPACE 3d72cef70a441a8260ce53dfc26a04d1 *tests/testthat/test-parallel/teardown/tests/testthat.R 9376d26271173941e438393485268f18 *tests/testthat/test-parallel/teardown/tests/testthat/teardown-bad.R 8dc16b4aa90f99ff9c905437f885592b *tests/testthat/test-parallel/teardown/tests/testthat/test-empty.R 39813170f6986b5cd601116137d89168 *tests/testthat/test-parallel/teardown/tests/testthat/test-teardown-1.R d41d8cd98f00b204e9800998ecf8427e *tests/testthat/test-path-present/tests/testthat/empty c19afd37c26b1ab9550bd62be3bb6a04 *tests/testthat/test-quasi-label.R ed5f63208289841bcac7bbc06cca082d *tests/testthat/test-reporter-check.R 08a61f02dc428cee199db017bc5372d3 *tests/testthat/test-reporter-debug.R 7c8f86fc6d2d5c92ba9773c3f6621e67 *tests/testthat/test-reporter-junit.R bf589b9a581b3a7f97a2c1534f55a9de *tests/testthat/test-reporter-list.R 880454ed2efd07f94e48a60caa23c269 *tests/testthat/test-reporter-llm.R 3d307310e7394ec031092bf46ffca5f6 *tests/testthat/test-reporter-location.R 7884937e8a588fd8dbb0dc7470478be8 *tests/testthat/test-reporter-minimal.R e966ab94f3dfb4422545f93ad5272ed2 *tests/testthat/test-reporter-multi.R 36dd3f11b56a908c0da1f31154ae6dd5 *tests/testthat/test-reporter-progress.R b97f40768cbb38cdd8672c483342c041 *tests/testthat/test-reporter-rstudio.R 67f9cf6d77482b5e4f97bd6eac82a141 *tests/testthat/test-reporter-silent.R acfbce390ba6a98c7831548d606095b3 *tests/testthat/test-reporter-slow.R 22b81f9e4e7a614bcbc312252260945c *tests/testthat/test-reporter-stop.R fd802fa9af88eadfcafaa20ae13ef368 *tests/testthat/test-reporter-summary.R 19081ae794f7447f93376eb59b4080de *tests/testthat/test-reporter-tap.R 7e7a59a45234dbf25710470819604770 *tests/testthat/test-reporter-teamcity.R 6a8d46c4f004577510eba084a3d09ef8 *tests/testthat/test-reporter-zzz.R 58a6537db741b3eeb5c8b0c356eadc9f *tests/testthat/test-reporter.R e7ab2649b1932d5abe4da9b9fae8d065 *tests/testthat/test-skip.R b37fd261db81d69032da921e1092da3c *tests/testthat/test-snapshot-cleanup.R 4cc0e6cdce17502b95fc89eb16bae733 *tests/testthat/test-snapshot-file-snaps.R 33686526a0a99129cba235f5f4887222 *tests/testthat/test-snapshot-file.R 57e340530365781a23ed032136f470a9 *tests/testthat/test-snapshot-manage.R d30af5c3a97827a4babb7b944ab81c27 *tests/testthat/test-snapshot-reporter.R 5f605a0c457492a4b75914cd81124526 *tests/testthat/test-snapshot-serialize.R 0287e9c596924b7735b75e1fa56a061b *tests/testthat/test-snapshot-value.R 0fa8405c26d0e0e27bf5a78379407863 *tests/testthat/test-snapshot.R 5d4b8e95ddde21af7c298512f1012c35 *tests/testthat/test-snapshot/_snaps/snapshot.md 797943912637f1bcd858ab419f74e152 *tests/testthat/test-snapshot/test-expect-condition.R 2bb5aca20c88188e301a72c26d6ef087 *tests/testthat/test-snapshot/test-snapshot.R 16583f55baa1a7bdb2051704bfefa363 *tests/testthat/test-source.R 44f3ab549feee6e5f7b84797c9007c45 *tests/testthat/test-srcrefs.R dc79665543883d8792f2899e8874047d *tests/testthat/test-teardown.R c8376dc1c47a51eb9c76f248406a0a03 *tests/testthat/test-teardown/test-teardown.R fd3faedfd2c5eb1842b521e6f4a8b451 *tests/testthat/test-test-compiled-code.R 725d8e1ef8e77eb5c3d10c8f2a9fd02f *tests/testthat/test-test-env.R e60300be58e6ba9157a4ecadc201a61f *tests/testthat/test-test-example.R e1db4d6a3ca5029e2d9dd578b2b36104 *tests/testthat/test-test-files.R 974e0f60717aa9ad80e54368d80baea0 *tests/testthat/test-test-path.R 5d4dcdfb9a923c5a7a10b97542e8cbb1 *tests/testthat/test-test-state.R 130d4937bd76c5342be0bbc3911ebc67 *tests/testthat/test-test-that.R 00e91e150a059625fe7cbedecdb2f0e3 *tests/testthat/test-try-again.R d057f919f3b2a3290f5fffac03562c85 *tests/testthat/test-verify-conditions-cr.txt cee89667e467d2849be1bdbfe639813e *tests/testthat/test-verify-conditions-lines.txt 42bfccf3e2931066fc189d657d36fa27 *tests/testthat/test-verify-conditions.txt 116a95120141a2554ce41fd7ca35cc6b *tests/testthat/test-verify-constructed-calls.txt 2693471a9ec7948154d0c44afcc90266 *tests/testthat/test-verify-output.R 825064675efaad62e7b9e69653bd169c *tests/testthat/test-verify-output.txt e20a8def27befc0c9f779683b97c8d4a *tests/testthat/test-verify-unicode-false.txt 28606b7dd874612a7e8ed0c1e0b0ab4d *tests/testthat/test-verify-unicode-true.txt f5af1f7fab9ca0f74cabd7e5eb913cb7 *tests/testthat/test-warning/test-warning.R 6bf702da230c48e50f3bb5fdb376d5c5 *tests/testthat/test-watcher.R 345e417086aff30079462a156c180e16 *tests/testthat/testConfigLoadAll/DESCRIPTION ac9a51df3508649fb084324d6824388d *tests/testthat/testConfigLoadAll/NAMESPACE e5fada6c3bb75090e660927ca2976654 *tests/testthat/testConfigLoadAll/R/config.R 9f96a4a181af8dcebbf26045248a0f0e *tests/testthat/testConfigLoadAll/tests/testthat.R 6ac12b92ae9c275a25e3d91d69a9e3c9 *tests/testthat/testConfigLoadAll/tests/testthat/helper-config.R 01e8de16589bc2d597037c7abd1cb454 *tests/testthat/testConfigLoadAll/tests/testthat/test-config.R a6efbc07fba2e30ed4da6b6806873548 *tests/testthat/test_dir/helper_hello.R 78eae8b642ede48d5023c6e6abe463fc *tests/testthat/test_dir/test-bare-expectations.R ba645a6fc60ca2944390c7503cdffcfb *tests/testthat/test_dir/test-basic.R e9e88c0f432cda4fcd7b18029a976d08 *tests/testthat/test_dir/test-empty.R 6af43cbf46ca3b343269f62408f86b28 *tests/testthat/test_dir/test-errors.R 293aeab21289b01d552cfee428197607 *tests/testthat/test_dir/test-failures.R be7cdfaf36144fd3faf4b5ecd6590673 *tests/testthat/test_dir/test-helper.R d60e93fdd0bbe19790181b8b4cd75b16 *tests/testthat/test_dir/test-skip.R 703ea03463d04859caee9ec7be0ebfed *tests/testthat/too-many-failures.R 51e992e553c268305920c6522b0b284e *tests/testthat/utf8.R 9674460b911257f8a2de15ed1a900b14 *tests/testthat/width-80.txt 5882704a5ce3db4271f901a685641476 *vignettes/challenging-tests.Rmd f6262fb833b8d17aa87f711050a13dbc *vignettes/custom-expectation.Rmd cb2ab7500571c5da4127bc8736bf2862 *vignettes/mocking.Rmd c51bcdddbfee109ac065b1601e1fff43 *vignettes/parallel.Rmd d67a684934b84d11edf5e52ad8fa4fe2 *vignettes/review-image.png b8d2d88c269e3edce142c51baf7af114 *vignettes/review-text.png a4e543566b5aa47bc9fc4d4687e312ee *vignettes/skipping.Rmd 7dfb290b89829b01c1711297ab3fe785 *vignettes/snapshotting.Rmd 97def4ca015ee8145ff7af69249b5e67 *vignettes/special-files.Rmd 3926ff58fb28eb857060181fdc3656ad *vignettes/test-fixtures.Rmd 2251710e467ca3b0578655b64509b686 *vignettes/third-edition.Rmd testthat/R/0000755000176200001440000000000015127731656012330 5ustar liggesuserstestthat/R/deprec-condition.R0000644000176200001440000000713615077746007015710 0ustar liggesusersnew_capture <- function(class) { exiting_handlers <- rep_named(class, list(identity)) calling_handlers <- rep_named( class, alist(function(cnd) { if (can_entrace(cnd)) { cnd <- cnd_entrace(cnd) } return_from(env, cnd) }) ) formals <- pairlist2(code = , entrace = FALSE) # R CMD check global variable NOTE code <- entrace <- NULL body <- expr({ if (!entrace) { return(tryCatch( { code NULL }, !!!exiting_handlers )) } env <- environment() withCallingHandlers( { code NULL }, !!!calling_handlers ) }) new_function(formals, body, ns_env("testthat")) } #' Capture conditions, including messages, warnings, expectations, and errors. #' #' @description #' `r lifecycle::badge("superseded")` #' #' These functions allow you to capture the side-effects of a function call #' including printed output, messages and warnings. We no longer recommend #' that you use these functions, instead relying on the [expect_message()] #' and friends to bubble up unmatched conditions. If you just want to silence #' unimportant warnings, use [suppressWarnings()]. #' #' @param code Code to evaluate #' @param entrace Whether to add a [backtrace][rlang::trace_back] to #' the captured condition. #' @return Singular functions (`capture_condition`, `capture_expectation` etc) #' return a condition object. `capture_messages()` and `capture_warnings` #' return a character vector of message text. #' @keywords internal #' @export #' @examples #' f <- function() { #' message("First") #' warning("Second") #' message("Third") #' } #' #' capture_message(f()) #' capture_messages(f()) #' #' capture_warning(f()) #' capture_warnings(f()) #' #' # Condition will capture anything #' capture_condition(f()) capture_condition <- new_capture("condition") #' @export #' @rdname capture_condition capture_error <- new_capture("error") #' @export #' @rdname capture_condition capture_expectation <- new_capture("expectation") #' @export #' @rdname capture_condition capture_message <- new_capture("condition") #' @export #' @rdname capture_condition capture_warning <- new_capture("warning") #' @export #' @rdname capture_condition capture_messages <- function(code) { out <- Stack$new() withCallingHandlers( code, message = function(condition) { out$push(condition) tryInvokeRestart("muffleMessage") } ) get_messages(out$as_list()) } #' @export #' @rdname capture_condition capture_warnings <- function(code, ignore_deprecation = FALSE) { out <- Stack$new() withCallingHandlers( code, warning = function(condition) { if (ignore_deprecation && is_deprecation(condition)) { return() } out$push(condition) tryInvokeRestart("muffleWarning") } ) get_messages(out$as_list()) } get_messages <- function(x) { map_chr(x, cnd_message) } #' Is an error informative? #' #' @description #' `r lifecycle::badge("deprecated")` #' #' `is_informative_error()` is a generic predicate that indicates #' whether testthat users should explicitly test for an error #' class. Since we no longer recommend you do that, this generic #' has been deprecated. #' #' @param x An error object. #' @inheritParams rlang::args_dots_empty #' #' @details #' A few classes are hard-coded as uninformative: #' - `simpleError` #' - `rlang_error` unless a subclass is detected #' - `Rcpp::eval_error` #' - `Rcpp::exception` #' #' @keywords internal #' @export is_informative_error <- function(x, ...) { lifecycle::deprecate_stop("3.0.0", "is_informative_error()") } testthat/R/reporter-rstudio.R0000644000176200001440000000132115047715224015773 0ustar liggesusers#' Report results to RStudio #' #' This reporter is designed for output to RStudio. It produces results in #' any easily parsed form. #' #' @export #' @family reporters RStudioReporter <- R6::R6Class( "RStudioReporter", inherit = Reporter, public = list( initialize = function(...) { self$capabilities$parallel_support <- TRUE super$initialize(...) }, add_result = function(context, test, result) { if (expectation_success(result)) { return() } loc <- expectation_location(result) status <- expectation_type(result) first_line <- strsplit(result$message, "\n")[[1]][1] self$cat_line(loc, " [", status, "] ", test, ". ", first_line) } ) ) testthat/R/mock2.R0000644000176200001440000001741215072252215013457 0ustar liggesusers#' Temporarily redefine function definitions #' #' @description #' `with_mocked_bindings()` and `local_mocked_bindings()` provide tools for #' "mocking", temporarily redefining a function so that it behaves differently #' during tests. This is helpful for testing functions that depend on external #' state (i.e. reading a value from a file or a website, or pretending a package #' is or isn't installed). #' #' Learn more in `vignette("mocking")`. #' #' # Use #' #' There are four places that the function you are trying to mock might #' come from: #' #' * Internal to your package. #' * Imported from an external package via the `NAMESPACE`. #' * The base environment. #' * Called from an external package with `::`. #' #' They are described in turn below. #' #' (To mock S3 & S4 methods and R6 classes see [local_mocked_s3_method()], #' [local_mocked_s4_method()], and [local_mocked_r6_class()].) #' #' ## Internal & imported functions #' #' You mock internal and imported functions the same way. For example, take #' this code: #' #' ```R #' some_function <- function() { #' another_function() #' } #' ``` #' #' It doesn't matter whether `another_function()` is defined by your package #' or you've imported it from a dependency with `@import` or `@importFrom`, #' you mock it the same way: #' #' ```R #' local_mocked_bindings( #' another_function = function(...) "new_value" #' ) #' ``` #' #' ## Base functions #' #' To mock a function in the base package, you need to make sure that you #' have a binding for this function in your package. It's easiest to do this #' by binding the value to `NULL`. For example, if you wanted to mock #' `interactive()` in your package, you'd need to include this code somewhere #' in your package: #' #' ```R #' interactive <- NULL #' ``` #' #' Why is this necessary? `with_mocked_bindings()` and `local_mocked_bindings()` #' work by temporarily modifying the bindings within your package's namespace. #' When these tests are running inside of `R CMD check` the namespace is locked #' which means it's not possible to create new bindings so you need to make sure #' that the binding exists already. #' #' ## Namespaced calls #' #' It's trickier to mock functions in other packages that you call with `::`. #' For example, take this minor variation: #' #' ```R #' some_function <- function() { #' anotherpackage::another_function() #' } #' ``` #' #' To mock this function, you'd need to modify `another_function()` inside the #' `anotherpackage` package. You _can_ do this by supplying the `.package` #' argument to `local_mocked_bindings()` but we don't recommend it because #' it will affect all calls to `anotherpackage::another_function()`, not just #' the calls originating in your package. Instead, it's safer to either import #' the function into your package, or make a wrapper that you can mock: #' #' ```R #' some_function <- function() { #' my_wrapper() #' } #' my_wrapper <- function(...) { #' anotherpackage::another_function(...) #' } #' #' local_mocked_bindings( #' my_wrapper = function(...) "new_value" #' ) #' ``` #' #' ## Multiple return values / sequence of outputs #' #' To mock a function that returns different values in sequence, #' for instance an API call whose status would be 502 then 200, #' or an user input to `readline()`, you can use [mock_output_sequence()] #' #' ```R #' local_mocked_bindings(readline = mock_output_sequence("3", "This is a note", "n")) #' ``` #' #' @export #' @param ... Name-value pairs providing new values (typically functions) to #' temporarily replace the named bindings. #' @param code Code to execute with specified bindings. #' @param .env Environment that defines effect scope. For expert use only. #' @param .package The name of the package where mocked functions should be #' inserted. Generally, you should not supply this as it will be automatically #' detected when whole package tests are run or when there's one package #' under active development (i.e. loaded with [pkgload::load_all()]). #' We don't recommend using this to mock functions in other packages, #' as you should not modify namespaces that you don't own. #' @family mocking local_mocked_bindings <- function(..., .package = NULL, .env = caller_env()) { bindings <- list2(...) check_bindings(bindings) .package <- .package %||% dev_package() ns_env <- ns_env(.package) # Rebind in namespace, imports, and the global environment envs <- list(ns_env, env_parent(ns_env), globalenv()) bindings_found <- rep_named(names(bindings), FALSE) for (env in envs) { local_bindings_rebind(!!!bindings, .env = env, .frame = .env) bindings_found <- bindings_found | env_has(env, names(bindings)) } # And mock S3 methods methods_env <- ns_env[[".__S3MethodsTable__."]] local_bindings_rebind(!!!bindings, .env = methods_env, .frame = .env) # If needed, also mock in the package environment so we can call directly if (is_attached(paste0("package:", .package))) { local_bindings_rebind(!!!bindings, .env = pkg_env(.package), .frame = .env) } # And in the current testing environment test_env <- the$testing_env if (!is.null(test_env)) { local_bindings_rebind(!!!bindings, .env = test_env, .frame = .env) } if (!all(bindings_found)) { missing <- names(bindings)[!bindings_found] cli::cli_abort("Can't find binding for {.arg {missing}}") } invisible() } #' @rdname local_mocked_bindings #' @export with_mocked_bindings <- function(code, ..., .package = NULL) { local_mocked_bindings(..., .package = .package) code } local_testing_env <- function(env, frame = caller_env()) { local_bindings(testing_env = env, .env = the, .frame = frame) } # helpers ----------------------------------------------------------------- # Wrapper around local_bindings() that only rebinds existing values, # automatically unlocking as needed. We can only rebind because most of # these environments are locked, meaning we can't add new bindings. local_bindings_rebind <- function(..., .env = .frame, .frame = caller_env()) { bindings <- list2(...) bindings <- bindings[env_has(.env, names(bindings))] if (length(bindings) == 0) { return() } nms <- names(bindings) locked <- env_binding_unlock(.env, nms) withr::defer(env_binding_lock(.env, nms[locked]), envir = .frame) local_bindings(!!!bindings, .env = .env, .frame = .frame) invisible() } dev_package <- function(call = caller_env()) { if (is_testing() && testing_package() != "") { testing_package() } else { loaded <- loadedNamespaces() is_dev <- map_lgl(loaded, function(x) !is.null(pkgload::dev_meta(x))) if (sum(is_dev) == 0) { cli::cli_abort("No packages loaded with pkgload", call = call) } else if (sum(is_dev) == 1) { loaded[is_dev] } else { cli::cli_abort("Multiple packages loaded with pkgload", call = call) } } } check_bindings <- function(x, error_call = caller_env()) { if (!is_named(x)) { cli::cli_abort( "All elements of {.arg ...} must be named.", call = error_call ) } } # For testing ------------------------------------------------------------- test_mock_direct <- function() { "y" } test_mock_internal <- function() { test_mock_internal2() } test_mock_internal2 <- function() "y" test_mock_imports <- function() { as.character(sym("y")) } test_mock_namespaced <- function() { as.character(rlang::sym("y")) } test_mock_method <- function(x) { UseMethod("test_mock_method") } #' @export test_mock_method.integer <- function(x) { "y" } test_mock_base <- function() { interactive() } interactive <- NULL show_bindings <- function(name, env = caller_env()) { envs <- env_parents(env) has_binding <- Filter(function(env) env_has(env, name), envs) lapply(has_binding, env_desc) invisible() } test_mock_value <- 10 env_desc <- function(env) { cat(obj_address(env), ": ", env_name(env), "\n", sep = "") } testthat/R/test-path.R0000644000176200001440000000231115040747540014352 0ustar liggesusers#' Locate a file in the testing directory #' #' Many tests require some external file (e.g. a `.csv` if you're testing a #' data import function) but the working directory varies depending on the way #' that you're running the test (e.g. interactively, with `devtools::test()`, #' or with `R CMD check`). `test_path()` understands these variations and #' automatically generates a path relative to `tests/testthat`, regardless of #' where that directory might reside relative to the current working directory. #' #' @param ... Character vectors giving path components. #' @return A character vector giving the path. #' @export #' @examples #' \dontrun{ #' test_path("foo.csv") #' test_path("data", "foo.csv") #' } test_path <- function(...) { if (is_testing() && !isTRUE(getOption("testthat_interactive"))) { base <- NULL } else if (pkgload::is_loading()) { # Probably called from a helper file base <- NULL } else { base <- "tests/testthat" if (!dir.exists(base)) { cli::cli_abort("Can't find {.path {base}}.") } } file_path(base, ...) } file_path <- function(...) { paths <- compact(list2(...)) if (length(paths) == 0) { "." } else { do.call(file.path, paths) } } testthat/R/context.R0000644000176200001440000000275114164710002014122 0ustar liggesusers#' Describe the context of a set of tests. #' #' @description #' `r lifecycle::badge("superseded")` #' #' Use of `context()` is no longer recommended. Instead omit it, and messages #' will use the name of the file instead. This ensures that the context and #' test file name are always in sync. #' #' A context defines a set of tests that test related functionality. Usually #' you will have one context per file, but you may have multiple contexts #' in a single file if you so choose. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `context()` is deprecated in the third edition, and the equivalent #' information is instead recorded by the test file name. #' #' @param desc description of context. Should start with a capital letter. #' @keywords internal #' @export #' @examples #' context("String processing") #' context("Remote procedure calls") context <- function(desc) { edition_deprecate(3, "context()") context_start(desc) } context_start <- function(desc) { reporter <- get_reporter() if (!is.null(reporter)) { get_reporter()$.start_context(desc) } } #' Start test context from a file name #' #' For use in external reporters #' #' @param name file name #' @keywords internal #' @export context_start_file <- function(name) { context_start(context_name(name)) } context_name <- function(filename) { # Remove test- prefix filename <- sub("^test[-_]", "", filename) # Remove terminal extension filename <- sub("[.][Rr]$", "", filename) filename } testthat/R/reporter-location.R0000644000176200001440000000137115053661631016116 0ustar liggesusers#' Test reporter: location #' #' This reporter simply prints the location of every expectation and error. #' This is useful if you're trying to figure out the source of a segfault, #' or you want to figure out which code triggers a C/C++ breakpoint #' #' @export #' @family reporters LocationReporter <- R6::R6Class( "LocationReporter", inherit = Reporter, public = list( start_test = function(context, test) { self$cat_line("Start test: ", test) }, add_result = function(context, test, result) { status <- expectation_type(result) self$cat_line(" ", expectation_location(result), " [", status, "]") }, end_test = function(context, test) { self$cat_line("End test: ", test) self$cat_line() } ) ) testthat/R/source.R0000644000176200001440000001055415104635341013745 0ustar liggesusers#' Source a file, directory of files, or various important subsets #' #' These are used by [test_dir()] and friends #' #' @param path Path to files. #' @param pattern Regular expression used to filter files. #' @param env Environment in which to evaluate code. #' @param desc A character vector used to filter tests. This is used to #' (recursively) filter the content of the file, so that only the non-test #' code up to and including the matching test is run. #' @param chdir Change working directory to `dirname(path)`? #' @param wrap Automatically wrap all code within [test_that()]? This ensures #' that all expectations are reported, even if outside a test block. #' @param shuffle If `TRUE`, randomly reorder the top-level expressions #' in the file. #' @export #' @keywords internal source_file <- function( path, env = test_env(), chdir = TRUE, desc = NULL, wrap = TRUE, shuffle = FALSE, error_call = caller_env() ) { check_string(path, call = error_call) if (!file.exists(path)) { cli::cli_abort("{.arg path} does not exist.", call = error_call) } if (!is.environment(env)) { stop_input_type(env, "an environment", call = error_call) } check_character(desc, allow_null = TRUE) lines <- brio::read_lines(path) srcfile <- srcfilecopy( path, lines, file.info(path)[1, "mtime"], isFile = TRUE ) ## We need to parse from a connection, because parse() has a bug, ## and converts the input to the native encoding, if the text arg is used con <- textConnection(lines, encoding = "UTF-8") withr::defer(try(close(con), silent = TRUE)) exprs <- parse(con, n = -1, srcfile = srcfile, encoding = "UTF-8") if (shuffle) { exprs <- sample(exprs) } exprs <- filter_desc(exprs, desc, error_call = error_call) n <- length(exprs) if (n == 0L) { return(invisible()) } if (chdir) { old_dir <- setwd(dirname(path)) withr::defer(setwd(old_dir)) } withr::local_options(testthat_topenv = env, testthat_path = path) if (wrap) { invisible(test_code( code = exprs, env = env, reporter = get_reporter() %||% StopReporter$new() )) } else { withCallingHandlers( invisible(eval(exprs, env)), error = function(err) { cli::cli_abort( "Failed to evaluate {.path {path}}.", parent = err, call = error_call ) } ) } } filter_desc <- function(exprs, descs, error_call = caller_env()) { if (length(descs) == 0) { return(exprs) } desc <- descs[[1]] subtest_idx <- which(unname(map_lgl(exprs, is_subtest))) matching_idx <- keep(subtest_idx, \(idx) exprs[[idx]][[2]] == desc) if (length(matching_idx) == 0) { cli::cli_abort( "Failed to find test with description {.str {desc}}.", call = error_call ) } else if (length(matching_idx) > 1) { cli::cli_abort( "Found multiple tests with description {.str {desc}}.", call = error_call ) } # Want all code up to and including the matching test, except for subtests keep_idx <- setdiff(seq2(1, matching_idx), setdiff(subtest_idx, matching_idx)) # Recursively inspect the components of the subtest exprs[[matching_idx]][[3]] <- filter_desc( exprs[[matching_idx]][[3]], descs[-1], error_call = error_call ) exprs[keep_idx] } is_subtest <- function(expr) { is_call(expr, c("test_that", "describe", "it"), n = 2) && is_string(expr[[2]]) && is_call(expr[[3]], "{") } #' @rdname source_file #' @export source_dir <- function( path, pattern = "\\.[rR]$", env = test_env(), chdir = TRUE, wrap = TRUE, shuffle = FALSE ) { files <- sort(dir(path, pattern, full.names = TRUE)) error_call <- current_env() lapply(files, function(path) { source_file( path, env = env, chdir = chdir, wrap = wrap, shuffle = shuffle, error_call = error_call ) }) } #' @rdname source_file #' @export source_test_helpers <- function(path = "tests/testthat", env = test_env()) { source_dir(path, "^helper.*\\.[rR]$", env = env, wrap = FALSE) } #' @rdname source_file #' @export source_test_setup <- function(path = "tests/testthat", env = test_env()) { source_dir(path, "^setup.*\\.[rR]$", env = env, wrap = FALSE) } #' @rdname source_file #' @export source_test_teardown <- function(path = "tests/testthat", env = test_env()) { source_dir(path, "^teardown.*\\.[rR]$", env = env, wrap = FALSE) } testthat/R/import-standalone-types-check.R0000644000176200001440000002773315040747537020343 0ustar liggesusers# Standalone file: do not edit by hand # Source: # ---------------------------------------------------------------------- # # --- # repo: r-lib/rlang # file: standalone-types-check.R # last-updated: 2023-03-13 # license: https://unlicense.org # dependencies: standalone-obj-type.R # imports: rlang (>= 1.1.0) # --- # # ## Changelog # # 2023-03-13: # - Improved error messages of number checkers (@teunbrand) # - Added `allow_infinite` argument to `check_number_whole()` (@mgirlich). # - Added `check_data_frame()` (@mgirlich). # # 2023-03-07: # - Added dependency on rlang (>= 1.1.0). # # 2023-02-15: # - Added `check_logical()`. # # - `check_bool()`, `check_number_whole()`, and # `check_number_decimal()` are now implemented in C. # # - For efficiency, `check_number_whole()` and # `check_number_decimal()` now take a `NULL` default for `min` and # `max`. This makes it possible to bypass unnecessary type-checking # and comparisons in the default case of no bounds checks. # # 2022-10-07: # - `check_number_whole()` and `_decimal()` no longer treat # non-numeric types such as factors or dates as numbers. Numeric # types are detected with `is.numeric()`. # # 2022-10-04: # - Added `check_name()` that forbids the empty string. # `check_string()` allows the empty string by default. # # 2022-09-28: # - Removed `what` arguments. # - Added `allow_na` and `allow_null` arguments. # - Added `allow_decimal` and `allow_infinite` arguments. # - Improved errors with absent arguments. # # # 2022-09-16: # - Unprefixed usage of rlang functions with `rlang::` to # avoid onLoad issues when called from rlang (#1482). # # 2022-08-11: # - Added changelog. # # nocov start # Scalars ----------------------------------------------------------------- .standalone_types_check_dot_call <- .Call check_bool <- function(x, ..., allow_na = FALSE, allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x) && .standalone_types_check_dot_call(ffi_standalone_is_bool_1.0.7, x, allow_na, allow_null)) { return(invisible(NULL)) } stop_input_type( x, c("`TRUE`", "`FALSE`"), ..., allow_na = allow_na, allow_null = allow_null, arg = arg, call = call ) } check_string <- function(x, ..., allow_empty = TRUE, allow_na = FALSE, allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { is_string <- .rlang_check_is_string( x, allow_empty = allow_empty, allow_na = allow_na, allow_null = allow_null ) if (is_string) { return(invisible(NULL)) } } stop_input_type( x, "a single string", ..., allow_na = allow_na, allow_null = allow_null, arg = arg, call = call ) } .rlang_check_is_string <- function(x, allow_empty, allow_na, allow_null) { if (is_string(x)) { if (allow_empty || !is_string(x, "")) { return(TRUE) } } if (allow_null && rlang::is_null(x)) { return(TRUE) } if (allow_na && (identical(x, NA) || identical(x, na_chr))) { return(TRUE) } FALSE } check_name <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { is_string <- .rlang_check_is_string( x, allow_empty = FALSE, allow_na = FALSE, allow_null = allow_null ) if (is_string) { return(invisible(NULL)) } } stop_input_type( x, "a valid name", ..., allow_na = FALSE, allow_null = allow_null, arg = arg, call = call ) } IS_NUMBER_true <- 0 IS_NUMBER_false <- 1 IS_NUMBER_oob <- 2 check_number_decimal <- function(x, ..., min = NULL, max = NULL, allow_infinite = TRUE, allow_na = FALSE, allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (missing(x)) { exit_code <- IS_NUMBER_false } else if (0 == (exit_code <- .standalone_types_check_dot_call( ffi_standalone_check_number_1.0.7, x, allow_decimal = TRUE, min, max, allow_infinite, allow_na, allow_null ))) { return(invisible(NULL)) } .stop_not_number( x, ..., exit_code = exit_code, allow_decimal = TRUE, min = min, max = max, allow_na = allow_na, allow_null = allow_null, arg = arg, call = call ) } check_number_whole <- function(x, ..., min = NULL, max = NULL, allow_infinite = FALSE, allow_na = FALSE, allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (missing(x)) { exit_code <- IS_NUMBER_false } else if (0 == (exit_code <- .standalone_types_check_dot_call( ffi_standalone_check_number_1.0.7, x, allow_decimal = FALSE, min, max, allow_infinite, allow_na, allow_null ))) { return(invisible(NULL)) } .stop_not_number( x, ..., exit_code = exit_code, allow_decimal = FALSE, min = min, max = max, allow_na = allow_na, allow_null = allow_null, arg = arg, call = call ) } .stop_not_number <- function(x, ..., exit_code, allow_decimal, min, max, allow_na, allow_null, arg, call) { if (allow_decimal) { what <- "a number" } else { what <- "a whole number" } if (exit_code == IS_NUMBER_oob) { min <- min %||% -Inf max <- max %||% Inf if (min > -Inf && max < Inf) { what <- sprintf("%s between %s and %s", what, min, max) } else if (x < min) { what <- sprintf("%s larger than or equal to %s", what, min) } else if (x > max) { what <- sprintf("%s smaller than or equal to %s", what, max) } else { abort("Unexpected state in OOB check", .internal = TRUE) } } stop_input_type( x, what, ..., allow_na = allow_na, allow_null = allow_null, arg = arg, call = call ) } check_symbol <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_symbol(x)) { return(invisible(NULL)) } if (allow_null && rlang::is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a symbol", ..., allow_na = FALSE, allow_null = allow_null, arg = arg, call = call ) } check_arg <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_symbol(x)) { return(invisible(NULL)) } if (allow_null && rlang::is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "an argument name", ..., allow_na = FALSE, allow_null = allow_null, arg = arg, call = call ) } check_call <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_call(x)) { return(invisible(NULL)) } if (allow_null && rlang::is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a defused call", ..., allow_na = FALSE, allow_null = allow_null, arg = arg, call = call ) } check_environment <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_environment(x)) { return(invisible(NULL)) } if (allow_null && rlang::is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "an environment", ..., allow_na = FALSE, allow_null = allow_null, arg = arg, call = call ) } check_function <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_function(x)) { return(invisible(NULL)) } if (allow_null && rlang::is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a function", ..., allow_na = FALSE, allow_null = allow_null, arg = arg, call = call ) } check_closure <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_closure(x)) { return(invisible(NULL)) } if (allow_null && rlang::is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "an R function", ..., allow_na = FALSE, allow_null = allow_null, arg = arg, call = call ) } check_formula <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_formula(x)) { return(invisible(NULL)) } if (allow_null && rlang::is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a formula", ..., allow_na = FALSE, allow_null = allow_null, arg = arg, call = call ) } # Vectors ----------------------------------------------------------------- check_character <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_character(x)) { return(invisible(NULL)) } if (allow_null && rlang::is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a character vector", ..., allow_na = FALSE, allow_null = allow_null, arg = arg, call = call ) } check_logical <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is_logical(x)) { return(invisible(NULL)) } if (allow_null && rlang::is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a logical vector", ..., allow_na = FALSE, allow_null = allow_null, arg = arg, call = call ) } check_data_frame <- function(x, ..., allow_null = FALSE, arg = caller_arg(x), call = caller_env()) { if (!missing(x)) { if (is.data.frame(x)) { return(invisible(NULL)) } if (allow_null && rlang::is_null(x)) { return(invisible(NULL)) } } stop_input_type( x, "a data frame", ..., allow_null = allow_null, arg = arg, call = call ) } # nocov end testthat/R/stack.R0000644000176200001440000000262715047715224013561 0ustar liggesusers# Source: https://github.com/rstudio/shiny/blob/master/R/stack.R # License: GPL-3 # Relicensed a MIT with permission. # A Stack object backed by a list. The backing list will grow or shrink as # the stack changes in size. Stack <- R6Class( "Stack", class = FALSE, public = list( initialize = function(init = 20L) { # init is the initial size of the list. It is also used as the minimum # size of the list as it shrinks. private$stack <- vector("list", init) private$init <- init private$count <- 0L }, push = function(..., .list = NULL) { args <- c(list(...), .list) new_size <- private$count + length(args) # Grow if needed; double in size while (new_size > length(private$stack)) { private$stack[length(private$stack) * 2L] <- list(NULL) } private$stack[private$count + seq_along(args)] <- args private$count <- new_size invisible(self) }, size = function() { private$count }, # Return the entire stack as a list, where the first item in the list is the # oldest item in the stack, and the last item is the most recently added. as_list = function() { private$stack[seq_len(private$count)] } ), private = list( stack = NULL, # A list that holds the items count = 0L, # Current number of items in the stack init = 20L # Initial and minimum size of the stack ) ) testthat/R/mock-oo.R0000644000176200001440000001157215127561732014021 0ustar liggesusers#' Mock S3 and S4 methods #' #' @description #' These functions temporarily override S3 or S4 methods. They can mock #' methods that don't already exist, or temporarily remove a method by setting #' `definition = NULL`. #' #' Learn more about mocking in `vignette("mocking")`. #' #' @param generic A string giving the name of the generic. #' @param signature A character vector giving the signature of the method. #' @param definition A function providing the method definition, or `NULL` to #' temporarily remove the method. #' @param frame Calling frame which determines the scope of the mock. #' Only needed when wrapping in another local helper. #' @export #' @examples #' x <- as.POSIXlt(Sys.time()) #' local({ #' local_mocked_s3_method("length", "POSIXlt", function(x) 42) #' length(x) #' }) #' #' length(x) local_mocked_s3_method <- function( generic, signature, definition, frame = caller_env() ) { check_string(generic) check_string(signature) check_function(definition, allow_null = TRUE) old <- utils::getS3method(generic, signature, optional = TRUE) # Set the new method, or a pass-through stub if removing definition <- definition %||% function(...) NextMethod() registerS3method(generic, signature, definition, envir = frame) # On cleanup, restore old method or remove the one we added if (is.null(old)) { withr::defer(remove_s3_method(generic, signature, envir = frame), frame) } else { withr::defer( registerS3method(generic, signature, old, envir = frame), frame ) } invisible() } remove_s3_method <- function(generic, class, envir) { # Extracted from registerS3method() group_generics <- c("Math", "Ops", "matrixOps", "Summary", "Complex") if (generic %in% group_generics) { s3_envir <- .BaseNamespaceEnv } else { genfun <- get(generic, envir = envir) s3_envir <- environment(genfun) %||% .BaseNamespaceEnv } if (env_has(s3_envir, ".__S3MethodsTable__.")) { table <- env_get(s3_envir, ".__S3MethodsTable__.") env_unbind(table, paste0(generic, ".", class)) } } #' @rdname local_mocked_s3_method #' @export local_mocked_s4_method <- function( generic, signature, definition, frame = caller_env() ) { check_string(generic) check_character(signature) check_function(definition, allow_null = TRUE) generic_def <- methods::getGeneric(generic) if (is.null(generic_def)) { cli::cli_abort("Can't find generic {.fn {generic}}.") } set_method <- function(generic, signature, def) { env <- topenv(frame) old <- methods::getMethod(generic, signature, optional = TRUE) if (is.null(def)) { methods::removeMethod(generic, signature, env) } else { suppressMessages(methods::setMethod(generic, signature, def, env)) } old } old <- set_method(generic_def, signature, definition) withr::defer(set_method(generic_def, signature, old), frame) invisible() } #' Mock an R6 class #' #' @description #' This function allows you to temporarily override an R6 class definition. #' It works by creating a subclass then using [local_mocked_bindings()] to #' temporarily replace the original definition. This means that it will not #' affect subclasses of the original class; please file an issue if you need #' this. #' #' Learn more about mocking in `vignette("mocking")`. #' #' @export #' @param class An R6 class definition. #' @param public,private A named list of public and private methods/data. #' @inheritParams local_mocked_s3_method local_mocked_r6_class <- function( class, public = list(), private = list(), frame = caller_env() ) { if (!inherits(class, "R6ClassGenerator")) { stop_input_type(class, "an R6 class definition") } if (!is.list(public)) { stop_input_type(public, "a list") } if (!is.list(private)) { stop_input_type(private, "a list") } mocked_class <- mock_r6_class(class, public, private) local_mocked_bindings("{class$classname}" := mocked_class, .env = frame) } mock_r6_class <- function(class, public = list(), private = list()) { R6::R6Class( paste0("Mocked", class$classname), inherit = class, private = private, public = public ) } # For testing ------------------------------------------------------------------ TestMockClass <- R6::R6Class( "TestMockClass", public = list( sum = function() { self$public_fun() + self$public_val + private$private_fun() + private$private_val }, public_fun = function() 1, public_val = 20 ), private = list( private_fun = function() 300, private_val = 4000 ) ) # Silence R CMD check NOTE: I think it's because TestMockPerson is a claas # constructor #' @importFrom methods new TestMockPerson <- methods::setClass( "TestMockPerson", slots = c(name = "character", age = "numeric") ) methods::setGeneric("mock_age", function(x) standardGeneric("mock_age")) methods::setMethod("mock_age", "TestMockPerson", function(x) x@age) testthat/R/expect-reference.R0000644000176200001440000000205315072252215015663 0ustar liggesusers#' Do you expect a reference to this object? #' #' `expect_reference()` compares the underlying memory addresses of #' two symbols. It is for expert use only. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `expect_reference()` is deprecated in the third edition. If you know what #' you're doing, and you really need this behaviour, just use `is_reference()` #' directly: `expect_true(rlang::is_reference(x, y))`. #' #' @inheritParams expect_equal #' @family expectations #' @keywords internal #' @export expect_reference <- function( object, expected, info = NULL, label = NULL, expected.label = NULL ) { edition_deprecate(3, "expect_reference()") act <- quasi_label(enquo(object), label) exp <- quasi_label(enquo(expected), expected.label) if (!is_reference(act$val, exp$val)) { msg <- sprintf("Expected %s to be a reference to %s.", act$lab, exp$lab) fail(msg, info = info) } else { pass() } invisible(act$val) } # expect_reference() needs dev version of rlang utils::globalVariables("is_reference") testthat/R/describe.R0000644000176200001440000000472615047715224014236 0ustar liggesusers#' describe: a BDD testing language #' #' A simple [behavior-driven development #' (BDD)](https://en.wikipedia.org/wiki/Behavior-driven_development) #' [domain-specific language](https://en.wikipedia.org/wiki/Domain-specific_language) #' for writing tests. The language is similar to [RSpec](https://rspec.info/) #' for Ruby or [Mocha](https://mochajs.org/) for JavaScript. BDD tests read #' like sentences and it should thus be easier to understand what the #' specification of a function/component is. #' #' Tests using the `describe` syntax not only verify the tested code, but #' also document its intended behaviour. Each `describe` block specifies a #' larger component or function and contains a set of specifications. A #' specification is defined by an `it` block. Each `it` block #' functions as a test and is evaluated in its own environment. You #' can also have nested `describe` blocks. #' #' This test syntax helps to test the intended behaviour of your code. For #' example: you want to write a new function for your package. Try to describe #' the specification first using `describe`, before your write any code. #' After that, you start to implement the tests for each specification (i.e. #' the `it` block). #' #' Use `describe` to verify that you implement the right things and use #' [test_that()] to ensure you do the things right. #' #' @param description description of the feature #' @param code test code containing the specs #' @keywords internal #' @export #' @examples #' describe("matrix()", { #' it("can be multiplied by a scalar", { #' m1 <- matrix(1:4, 2, 2) #' m2 <- m1 * 2 #' expect_equal(matrix(1:4 * 2, 2, 2), m2) #' }) #' it("can have not yet tested specs") #' }) #' #' # Nested specs: #' ## code #' addition <- function(a, b) a + b #' division <- function(a, b) a / b #' #' ## specs #' describe("math library", { #' describe("addition()", { #' it("can add two numbers", { #' expect_equal(1 + 1, addition(1, 1)) #' }) #' }) #' describe("division()", { #' it("can divide two numbers", { #' expect_equal(10 / 2, division(10, 2)) #' }) #' it("can handle division by 0") #not yet implemented #' }) #' }) describe <- function(description, code) { local_description_push(description) code <- substitute(code) test_code(code, parent.frame()) } #' @export #' @rdname describe it <- function(description, code = NULL) { local_description_push(description) code <- substitute(code) test_code(code, parent.frame()) } testthat/R/mock2-helpers.R0000644000176200001440000000274415047715224015127 0ustar liggesusers#' Mock a sequence of output from a function #' #' Specify multiple return values for mocking #' #' @param ... <[`dynamic-dots`][rlang::dyn-dots]> Values to return in sequence. #' @param recycle whether to recycle. If `TRUE`, once all values have been returned, #' they will be returned again in sequence. #' #' @return A function that you can use within `local_mocked_bindings()` and #' `with_mocked_bindings()` #' @export #' #' @examples #' # inside local_mocked_bindings() #' \dontrun{ #' local_mocked_bindings(readline = mock_output_sequence("3", "This is a note", "n")) #' } #' # for understanding #' mocked_sequence <- mock_output_sequence("3", "This is a note", "n") #' mocked_sequence() #' mocked_sequence() #' mocked_sequence() #' try(mocked_sequence()) #' recycled_mocked_sequence <- mock_output_sequence( #' "3", "This is a note", "n", #' recycle = TRUE #' ) #' recycled_mocked_sequence() #' recycled_mocked_sequence() #' recycled_mocked_sequence() #' recycled_mocked_sequence() #' @family mocking mock_output_sequence <- function(..., recycle = FALSE) { values <- rlang::list2(...) i <- 1 function(...) { if (i > length(values) && !recycle) { cli::cli_abort(c( "Can't find value for {i}th iteration.", i = "{.arg ...} has only {length(values)} values.", i = "You can set {.arg recycle} to {.code TRUE}." )) } index <- (i - 1) %% length(values) + 1 value <- rep_len(values, length.out = index)[[index]] i <<- i + 1 value } } testthat/R/reporter-minimal.R0000644000176200001440000000137315047715224015737 0ustar liggesusers#' Report minimal results as compactly as possible #' #' The minimal test reporter provides the absolutely minimum amount of #' information: whether each expectation has succeeded, failed or experienced #' an error. If you want to find out what the failures and errors actually #' were, you'll need to run a more informative test reporter. #' #' @export #' @family reporters MinimalReporter <- R6::R6Class( "MinimalReporter", inherit = Reporter, public = list( initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE }, add_result = function(context, test, result) { self$cat_tight(single_letter_summary(result)) }, end_reporter = function() { self$cat_line() } ) ) testthat/R/snapshot-cleanup.R0000644000176200001440000000373515047715224015741 0ustar liggesuserssnapshot_cleanup <- function( path, test_files_seen = character(), snap_files_seen = character() ) { outdated <- snapshot_outdated(path, test_files_seen, snap_files_seen) if (length(outdated) > 0) { cli::cli_inform("Deleting unused snapshots: {.path {outdated}}") unlink(file.path(path, outdated), recursive = TRUE) } # Delete empty directories: # nest dir() inside list.dirs() to avoid picking up `.` directories dirs <- list.dirs(dir(path, full.names = TRUE)) empty <- dirs[map_lgl(dirs, is_dir_empty)] unlink(empty, recursive = TRUE) # Delete snapshot folder if (is_dir_empty(path)) { unlink(path, recursive = TRUE) } rstudio_tickle() invisible(outdated) } is_dir_empty <- function(x) { length(dir(x, recursive = TRUE)) == 0 } snapshot_outdated <- function( path, test_files_seen = character(), snap_files_seen = character() ) { all_files <- dir(path, recursive = TRUE) expected <- snapshot_expected(path, test_files_seen, snap_files_seen) setdiff(all_files, expected) } snapshot_expected <- function( snap_dir, test_files_seen = character(), snap_files_seen = character() ) { if (length(test_files_seen) > 0) { snaps <- c( paste0(test_files_seen, ".md"), paste0(test_files_seen, ".new.md") ) } else { snaps <- character() } # Empirically determine variants snap_dirs <- list.dirs(snap_dir, recursive = FALSE) is_variant <- dir_contains(snap_dirs, c(snaps, snap_files_seen)) variants <- basename(snap_dirs[is_variant]) snap_files_seen_new <- paste0( tools::file_path_sans_ext(snap_files_seen), ".new.", tools::file_ext(snap_files_seen) ) sort(c( snaps, outer(variants, snaps, file.path), snap_files_seen, outer(variants, snap_files_seen, file.path), snap_files_seen_new, outer(variants, snap_files_seen_new, file.path) )) } dir_contains <- function(paths, expected_files) { map_lgl(paths, \(path) any(file.exists(file.path(path, expected_files)))) } testthat/R/parallel-taskq.R0000644000176200001440000001771115104404205015355 0ustar liggesusers# See https://www.tidyverse.org/blog/2019/09/callr-task-q/ # for a detailed explanation on how the task queue works. # # Changes in this version, compared to the blog post: # * We use data frames instead of tibbles. This requires some caution # and the df_add_row() function below. # * We do not collect the results in a result column, because we # just return them immediately, as we get them. # * We do not need a pop() method, because poll() will just return # every message. PROCESS_DONE <- 200L PROCESS_STARTED <- 201L PROCESS_MSG <- 301L PROCESS_OUTPUT <- 302L PROCESS_EXITED <- 500L PROCESS_CRASHED <- 501L PROCESS_CLOSED <- 502L PROCESS_FAILURES <- c(PROCESS_EXITED, PROCESS_CRASHED, PROCESS_CLOSED) task_q <- R6::R6Class( "task_q", public = list( initialize = function(concurrency = 4L, ...) { private$start_workers(concurrency, ...) invisible(self) }, list_tasks = function() private$tasks, get_num_waiting = function() { sum(!private$tasks$idle & private$tasks$state == "waiting") }, get_num_running = function() { sum(!private$tasks$idle & private$tasks$state == "running") }, get_num_done = function() sum(private$tasks$state == "done"), is_idle = function() sum(!private$tasks$idle) == 0, push = function(fun, args = list(), id = NULL) { if (is.null(id)) { id <- private$get_next_id() } if (id %in% private$tasks$id) { cli::cli_abort("Duplicate task id.") } before <- which(private$tasks$idle)[1] private$tasks <- df_add_row( private$tasks, .before = before, id = id, idle = FALSE, state = "waiting", fun = I(list(fun)), args = I(list(args)), worker = I(list(NULL)), path = args[[1]], startup = I(list(NULL)) ) private$schedule() invisible(id) }, poll = function(timeout = 0) { limit <- Sys.time() + timeout as_ms <- function(x) { if (x == Inf) -1 else as.integer(as.double(x, "secs") * 1000) } repeat { pr <- vector(mode = "list", nrow(private$tasks)) topoll <- which(private$tasks$state == "running") pr[topoll] <- processx::poll( private$tasks$worker[topoll], as_ms(timeout) ) results <- lapply(seq_along(pr), function(i) { # nothing from this worker? if (is.null(pr[[i]]) || all(pr[[i]] != "ready")) { return() } # there is a testthat message? worker <- private$tasks$worker[[i]] msg <- if (pr[[i]][["process"]] == "ready") { worker$read() } # there is an output message? has_output <- pr[[i]][["output"]] == "ready" || pr[[i]][["error"]] == "ready" outmsg <- NULL if (has_output) { lns <- c( safely(worker$read_output_lines(), character()), safely(worker$read_error_lines(), character()) ) inc <- paste0( safely(worker$read_output(), ""), safely(worker$read_error(), "") ) if (nchar(inc)) { lns <- c(lns, strsplit(inc, "\n", fixed = TRUE)[[1]]) } # startup message? if (is.na(private$tasks$path[i])) { private$tasks$startup[[i]] <- c(private$tasks$startup[[i]], lns) } else { outmsg <- structure( list( code = PROCESS_OUTPUT, message = lns, path = private$tasks$path[i] ), class = "testthat_message" ) } } ## TODO: why can this be NULL? if (is.null(msg) || msg$code == PROCESS_MSG) { private$tasks$state[[i]] <- "running" } else if (msg$code == PROCESS_STARTED) { private$tasks$state[[i]] <- "ready" msg <- NULL } else if (msg$code == PROCESS_DONE) { if (!is.null(msg$error)) { private$handle_error(msg, i) } private$tasks$state[[i]] <- "ready" } else if (msg$code %in% PROCESS_FAILURES) { private$handle_error(msg, i) } else { file <- private$tasks$args[[i]][[1]] cli::cli_abort( c( "Unknown message from testthat subprocess: {msg$code}.", "i" = "In file {.file {file}}." ), test_file = file, class = c("testthat_process_error", "testthat_error") ) } compact(list(msg, outmsg)) }) # single list for all workers results <- compact(unlist(results, recursive = FALSE)) private$schedule() if (is.finite(timeout)) { timeout <- limit - Sys.time() } if (length(results) || timeout < 0) break } results } ), private = list( tasks = NULL, next_id = 1L, get_next_id = function() { id <- private$next_id private$next_id <- id + 1L paste0(".", id) }, start_workers = function(concurrency, ...) { nl <- I(replicate(concurrency, NULL)) private$tasks <- data.frame( stringsAsFactors = FALSE, id = paste0(".idle-", seq_len(concurrency)), idle = TRUE, state = "running", fun = nl, args = nl, worker = nl, path = NA_character_, startup = nl ) rsopts <- callr::r_session_options(stdout = "|", stderr = "|", ...) for (i in seq_len(concurrency)) { rs <- callr::r_session$new(rsopts, wait = FALSE) private$tasks$worker[[i]] <- rs } }, schedule = function() { ready <- which(private$tasks$state == "ready") if (!length(ready)) { return() } rss <- private$tasks$worker[ready] private$tasks$worker[ready] <- replicate(length(ready), NULL) private$tasks$state[ready] <- ifelse(private$tasks$idle[ready], "waiting", "done") done <- which(private$tasks$state == "done") if (any(done)) { private$tasks <- private$tasks[-done, ] } waiting <- which(private$tasks$state == "waiting")[1:length(ready)] private$tasks$worker[waiting] <- rss private$tasks$state[waiting] <- ifelse(private$tasks$idle[waiting], "ready", "running") lapply(waiting, function(i) { if (!private$tasks$idle[i]) { private$tasks$worker[[i]]$call( private$tasks$fun[[i]], private$tasks$args[[i]] ) } }) }, handle_error = function(msg, task_no) { cat("\n") # get out of the progress bar, if any fun <- private$tasks$fun[[task_no]] file <- private$tasks$args[[task_no]][[1]] if (is.null(fun)) { msg$error$stdout <- msg$stdout msg$error$stderr <- paste( c(private$tasks$startup[[task_no]], msg$stderr), collapse = "\n" ) cli::cli_abort( c( "testthat subprocess failed to start.", " " = "{no_wrap(msg$error$stderr)}" ), test_file = NULL, class = c("testthat_process_error", "testthat_error"), call = NULL ) } else { cli::cli_abort( "testthat subprocess exited in file {.file {file}}.", test_file = file, parent = msg$error, class = c("testthat_process_error", "testthat_error"), call = NULL ) } } ) ) df_add_row <- function(df, ..., .before = NULL) { before <- .before %||% (nrow(df) + 1L) row <- data.frame(stringsAsFactors = FALSE, ...) if (before > nrow(df)) { rbind(df, row) } else if (before <= 1L) { rbind(row, df) } else { rbind(df[1:(before - 1), ], row, df[before:nrow(df), ]) } } safely <- function(expr, default = NULL) { tryCatch( expr, error = function(e) default ) } silence_r_cmd_check <- function() callr::r_session testthat/R/test-example.R0000644000176200001440000000363115047715224015060 0ustar liggesusers#' Test package examples #' #' These helper functions make it easier to test the examples in a package. #' Each example counts as one test, and it succeeds if the code runs without #' an error. Generally, this is redundant with R CMD check, and is not #' recommended in routine practice. #' #' @keywords internal #' @param path For `test_examples()`, path to directory containing Rd files. #' For `test_example()`, path to a single Rd file. Remember the working #' directory for tests is `tests/testthat`. #' @param title Test title to use #' @param rd A parsed Rd object, obtained from [tools::Rd_db()] or otherwise. #' @export test_examples <- function(path = "../..") { res <- test_examples_source(path) %||% test_examples_installed() if (is.null(res)) { cli::cli_abort("Could not find examples.") } invisible(res) } test_examples_source <- function(path = "../..") { if (!dir.exists(file.path(path, "man"))) { return() } Rd <- tools::Rd_db(dir = path) if (length(Rd) == 0) { return() } lapply(Rd, test_rd) } test_examples_installed <- function(package = testing_package()) { if (identical(package, "") || is.null(package)) { return() } Rd <- tools::Rd_db(package = package) if (length(Rd) == 0) { return() } lapply(Rd, test_rd) } #' @export #' @rdname test_examples test_rd <- function(rd, title = attr(rd, "Rdfile")) { test_example(rd, title %||% "example") } #' @export #' @rdname test_examples test_example <- function(path, title = path) { local_description_push(title) ex_path <- withr::local_tempfile(pattern = "test_example-", fileext = ".R") tools::Rd2ex(path, ex_path) if (!file.exists(ex_path)) { return(invisible(FALSE)) } ok <- test_code( code = parse(ex_path, encoding = "UTF-8"), env = globalenv(), reporter = get_reporter() %||% StopReporter$new(), skip_on_empty = FALSE ) if (ok) { succeed(path) } invisible(ok) } testthat/R/expect-invisible.R0000644000176200001440000000260415072252215015713 0ustar liggesusers#' Do you expect the result to be (in)visible? #' #' Use this to test whether a function returns a visible or invisible #' output. Typically you'll use this to check that functions called primarily #' for their side-effects return their data argument invisibly. #' #' @param call A function call. #' @inheritParams expect_that #' @return The evaluated `call`, invisibly. #' @export #' @examples #' expect_invisible(x <- 10) #' expect_visible(x) #' #' # Typically you'll assign the result of the expectation so you can #' # also check that the value is as you expect. #' greet <- function(name) { #' message("Hi ", name) #' invisible(name) #' } #' out <- expect_invisible(greet("Hadley")) #' expect_equal(out, "Hadley") expect_invisible <- function(call, label = NULL) { lab <- label %||% expr_label(enexpr(call)) vis <- withVisible(call) if (!identical(vis$visible, FALSE)) { fail(c( sprintf("Expected %s to return invisibly.", lab), "Actual visibility: visible." )) } else { pass() } invisible(vis$value) } #' @export #' @rdname expect_invisible expect_visible <- function(call, label = NULL) { lab <- label %||% expr_label(enexpr(call)) vis <- withVisible(call) if (!identical(vis$visible, TRUE)) { fail(c( sprintf("Expected %s to return visibly.", lab), "Actual visibility: invisible." )) } else { pass() } invisible(vis$value) } testthat/R/snapshot-serialize.R0000644000176200001440000000343615047715224016277 0ustar liggesusers# data is list of character vectors snap_to_md <- function(data) { h2 <- paste0("# ", names(data), "\n\n") code_block <- function(x) paste0(indent_add(x), collapse = "\n\n---\n\n") data <- map_chr(data, code_block) paste0(h2, data, "\n\n", collapse = "") } snap_from_md <- function(lines) { lines <- gsub("\r", "", lines, fixed = TRUE) h2 <- grepl("^# ", lines) tests_group <- cumsum(h2) tests <- split(lines[!h2], tests_group[!h2]) names(tests) <- gsub("^# ", "", lines[h2]) split_tests <- function(lines) { sep <- grepl("^-{3,}", lines) case_group <- cumsum(sep) # Remove first line, separator, line above and line below # Only remove last line if it's empty (to handle missing final newlines) sep_loc <- which(sep) drop <- c(1, sep_loc, sep_loc + 1, sep_loc - 1) if (length(lines) > 0 && lines[length(lines)] == "") { drop <- c(drop, length(lines)) } cases <- unname(split(lines[-drop], case_group[-drop])) code_unblock <- function(x) paste0(indent_del(x), collapse = "\n") map_chr(cases, code_unblock) } lapply(tests, split_tests) } read_snaps <- function(path) { if (file.exists(path)) { lines <- brio::read_lines(path) snap_from_md(lines) } else { list() } } write_snaps <- function(snaps, path, delete = FALSE) { snaps <- compact(snaps) if (length(snaps) == 0) { if (delete) { unlink(path) } return() } out <- snap_to_md(snaps) brio::write_file(out, path) } # Helpers ----------------------------------------------------------------- indent_add <- function(x, prefix = " ") { paste0(prefix, gsub("\n", paste0("\n", prefix), x, fixed = TRUE)) } indent_del <- function(x, prefix = " ") { x <- gsub(paste0("^", prefix), "", x) x <- gsub(paste0("\n", prefix), "\n", x) x } testthat/R/expect-no-condition.R0000644000176200001440000000760215072252215016332 0ustar liggesusers#' Do you expect the absence of errors, warnings, messages, or other conditions? #' #' @description #' These expectations are the opposite of [expect_error()], #' `expect_warning()`, `expect_message()`, and `expect_condition()`. They #' assert the absence of an error, warning, or message, respectively. #' #' @inheritParams expect_error #' @param message,class The default, `message = NULL, class = NULL`, #' will fail if there is any error/warning/message/condition. #' #' In many cases, particularly when testing warnings and messages, you will #' want to be more specific about the condition you are hoping **not** to see, #' i.e. the condition that motivated you to write the test. Similar to #' `expect_error()` and friends, you can specify the `message` (a regular #' expression that the message of the condition must match) and/or the #' `class` (a class the condition must inherit from). This ensures that #' the message/warnings you don't want never recur, while allowing new #' messages/warnings to bubble up for you to deal with. #' #' Note that you should only use `message` with errors/warnings/messages #' that you generate, or that base R generates (which tend to be stable). #' Avoid tests that rely on the specific text generated by another package #' since this can easily change. If you do need to test text generated by #' another package, either protect the test with `skip_on_cran()` or #' use `expect_snapshot()`. #' @inheritParams rlang::args_dots_empty #' @export #' @examples #' expect_no_warning(1 + 1) #' #' foo <- function(x) { #' warning("This is a problem!") #' } #' #' # warning doesn't match so bubbles up: #' expect_no_warning(foo(), message = "bananas") #' #' # warning does match so causes a failure: #' try(expect_no_warning(foo(), message = "problem")) expect_no_error <- function(object, ..., message = NULL, class = NULL) { check_dots_empty() expect_no_("error", {{ object }}, regexp = message, class = class) } #' @export #' @rdname expect_no_error expect_no_warning <- function(object, ..., message = NULL, class = NULL) { check_dots_empty() expect_no_("warning", {{ object }}, regexp = message, class = class) } #' @export #' @rdname expect_no_error expect_no_message <- function(object, ..., message = NULL, class = NULL) { check_dots_empty() expect_no_("message", {{ object }}, regexp = message, class = class) } #' @export #' @rdname expect_no_error expect_no_condition <- function(object, ..., message = NULL, class = NULL) { check_dots_empty() expect_no_("condition", {{ object }}, regexp = message, class = class) } expect_no_ <- function( base_class, object, regexp = NULL, class = NULL, trace_env = caller_env() ) { matcher <- cnd_matcher( base_class, class, regexp = regexp, ignore_deprecation = base_class == "warning" && is.null(regexp) && is.null(class) ) first_match <- NULL capture <- function(code) { withRestarts( withCallingHandlers( code, condition = function(cnd) { if (!is.null(first_match) || !matcher(cnd)) { return() } first_match <<- cnd cnd_muffle(cnd) # Matching errors should terminate execution if (inherits(cnd, "error")) { invokeRestart("done") } } ), done = function() {} ) } act <- quasi_capture(enquo(object), NULL, capture) if (!is.null(first_match)) { msg_exp <- paste0( "Expected ", act$lab, " not to throw any ", base_class, "s", if (!is.null(class)) paste0(" of class '", class, "'"), if (!is.null(regexp)) paste0(" matching pattern '", regexp, "'"), "." ) msg_act <- actual_condition(first_match) fail(c(msg_exp, msg_act), trace_env = trace_env) } else { pass() } invisible(act$val) } indent_lines <- function(x) { paste0(" ", gsub("\n", "\n ", x)) } testthat/R/teardown.R0000644000176200001440000000401015047715224014263 0ustar liggesusersfile_teardown_env <- new.env(parent = emptyenv()) file_teardown_env$queue <- list() #' Run code before/after tests #' #' @description #' `r lifecycle::badge("superseded")` #' #' We no longer recommend using `setup()` and `teardown()`; instead #' we think it's better practice to use a **test fixture** as described in #' `vignette("test-fixtures")`. #' #' Code in a `setup()` block is run immediately in a clean environment. #' Code in a `teardown()` block is run upon completion of a test file, #' even if it exits with an error. Multiple calls to `teardown()` will be #' executed in the order they were created. #' #' @param code Code to evaluate #' @param env Environment in which code will be evaluated. For expert #' use only. #' @export #' @keywords internal #' @examples #' \dontrun{ #' # Old approach #' tmp <- tempfile() #' setup(writeLines("some test data", tmp)) #' teardown(unlink(tmp)) #' } #' #' # Now recommended: #' local_test_data <- function(env = parent.frame()) { #' tmp <- tempfile() #' writeLines("some test data", tmp) #' withr::defer(unlink(tmp), env) #' #' tmp #' } #' # Then call local_test_data() in your tests teardown <- function(code, env = parent.frame()) { edition_deprecate( 3, "teardown()", "Please use test fixtures instead see vignette('test-fixtures') for details" ) fun <- new_function(list(), enexpr(code), env = env) file_teardown_env$queue <- append(file_teardown_env$queue, fun) invisible() } #' @export #' @rdname teardown setup <- function(code, env = parent.frame()) { edition_deprecate( 3, "setup()", "Please use test fixtures instead see vignette('test-fixtures') for details" ) out <- eval_tidy(enquo(code), env = env) invisible(out) } teardown_reset <- function() { file_teardown_env$queue <- list() } teardown_run <- function(path = ".") { if (length(file_teardown_env$queue) == 0) { return() } old_dir <- setwd(path) withr::defer(setwd(old_dir)) lapply(file_teardown_env$queue, function(f) try(f())) teardown_reset() gc() } testthat/R/import-standalone-obj-type.R0000644000176200001440000002032315040747537017641 0ustar liggesusers# Standalone file: do not edit by hand # Source: # ---------------------------------------------------------------------- # # --- # repo: r-lib/rlang # file: standalone-obj-type.R # last-updated: 2022-10-04 # license: https://unlicense.org # imports: rlang (>= 1.1.0) # --- # # ## Changelog # # 2022-10-04: # - `obj_type_friendly(value = TRUE)` now shows numeric scalars # literally. # - `stop_friendly_type()` now takes `show_value`, passed to # `obj_type_friendly()` as the `value` argument. # # 2022-10-03: # - Added `allow_na` and `allow_null` arguments. # - `NULL` is now backticked. # - Better friendly type for infinities and `NaN`. # # 2022-09-16: # - Unprefixed usage of rlang functions with `rlang::` to # avoid onLoad issues when called from rlang (#1482). # # 2022-08-11: # - Prefixed usage of rlang functions with `rlang::`. # # 2022-06-22: # - `friendly_type_of()` is now `obj_type_friendly()`. # - Added `obj_type_oo()`. # # 2021-12-20: # - Added support for scalar values and empty vectors. # - Added `stop_input_type()` # # 2021-06-30: # - Added support for missing arguments. # # 2021-04-19: # - Added support for matrices and arrays (#141). # - Added documentation. # - Added changelog. # # nocov start #' Return English-friendly type #' @param x Any R object. #' @param value Whether to describe the value of `x`. Special values #' like `NA` or `""` are always described. #' @param length Whether to mention the length of vectors and lists. #' @return A string describing the type. Starts with an indefinite #' article, e.g. "an integer vector". #' @noRd obj_type_friendly <- function(x, value = TRUE) { if (is_missing(x)) { return("absent") } if (is.object(x)) { if (inherits(x, "quosure")) { type <- "quosure" } else { type <- paste(class(x), collapse = "/") } return(sprintf("a <%s> object", type)) } if (!is_vector(x)) { return(.rlang_as_friendly_type(typeof(x))) } n_dim <- length(dim(x)) if (!n_dim) { if (!is_list(x) && length(x) == 1) { if (is_na(x)) { return(switch( typeof(x), logical = "`NA`", integer = "an integer `NA`", double = if (is.nan(x)) { "`NaN`" } else { "a numeric `NA`" }, complex = "a complex `NA`", character = "a character `NA`", .rlang_stop_unexpected_typeof(x) )) } show_infinites <- function(x) { if (x > 0) { "`Inf`" } else { "`-Inf`" } } str_encode <- function(x, width = 30, ...) { if (nchar(x) > width) { x <- substr(x, 1, width - 3) x <- paste0(x, "...") } encodeString(x, ...) } if (value) { if (is.numeric(x) && is.infinite(x)) { return(show_infinites(x)) } if (is.numeric(x) || is.complex(x)) { number <- as.character(round(x, 2)) what <- if (is.complex(x)) "the complex number" else "the number" return(paste(what, number)) } return(switch( typeof(x), logical = if (x) "`TRUE`" else "`FALSE`", character = { what <- if (nzchar(x)) "the string" else "the empty string" paste(what, str_encode(x, quote = "\"")) }, raw = paste("the raw value", as.character(x)), .rlang_stop_unexpected_typeof(x) )) } return(switch( typeof(x), logical = "a logical value", integer = "an integer", double = if (is.infinite(x)) show_infinites(x) else "a number", complex = "a complex number", character = if (nzchar(x)) "a string" else "\"\"", raw = "a raw value", .rlang_stop_unexpected_typeof(x) )) } if (length(x) == 0) { return(switch( typeof(x), logical = "an empty logical vector", integer = "an empty integer vector", double = "an empty numeric vector", complex = "an empty complex vector", character = "an empty character vector", raw = "an empty raw vector", list = "an empty list", .rlang_stop_unexpected_typeof(x) )) } } vec_type_friendly(x) } vec_type_friendly <- function(x, length = FALSE) { if (!is_vector(x)) { abort("`x` must be a vector.") } type <- typeof(x) n_dim <- length(dim(x)) add_length <- function(type) { if (length && !n_dim) { paste0(type, sprintf(" of length %s", length(x))) } else { type } } if (type == "list") { if (n_dim < 2) { return(add_length("a list")) } else if (is.data.frame(x)) { return("a data frame") } else if (n_dim == 2) { return("a list matrix") } else { return("a list array") } } type <- switch( type, logical = "a logical %s", integer = "an integer %s", numeric = , double = "a double %s", complex = "a complex %s", character = "a character %s", raw = "a raw %s", type = paste0("a ", type, " %s") ) if (n_dim < 2) { kind <- "vector" } else if (n_dim == 2) { kind <- "matrix" } else { kind <- "array" } out <- sprintf(type, kind) if (n_dim >= 2) { out } else { add_length(out) } } .rlang_as_friendly_type <- function(type) { switch( type, list = "a list", NULL = "`NULL`", environment = "an environment", externalptr = "a pointer", weakref = "a weak reference", S4 = "an S4 object", name = , symbol = "a symbol", language = "a call", pairlist = "a pairlist node", expression = "an expression vector", char = "an internal string", promise = "an internal promise", ... = "an internal dots object", any = "an internal `any` object", bytecode = "an internal bytecode object", primitive = , builtin = , special = "a primitive function", closure = "a function", type ) } .rlang_stop_unexpected_typeof <- function(x, call = caller_env()) { abort( sprintf("Unexpected type <%s>.", typeof(x)), call = call ) } #' Return OO type #' @param x Any R object. #' @return One of `"bare"` (for non-OO objects), `"S3"`, `"S4"`, #' `"R6"`, or `"R7"`. #' @noRd obj_type_oo <- function(x) { if (!is.object(x)) { return("bare") } class <- inherits(x, c("R6", "R7_object"), which = TRUE) if (class[[1]]) { "R6" } else if (class[[2]]) { "R7" } else if (isS4(x)) { "S4" } else { "S3" } } #' @param x The object type which does not conform to `what`. Its #' `obj_type_friendly()` is taken and mentioned in the error message. #' @param what The friendly expected type as a string. Can be a #' character vector of expected types, in which case the error #' message mentions all of them in an "or" enumeration. #' @param show_value Passed to `value` argument of `obj_type_friendly()`. #' @param ... Arguments passed to [abort()]. #' @inheritParams args_error_context #' @noRd stop_input_type <- function(x, what, ..., allow_na = FALSE, allow_null = FALSE, show_value = TRUE, arg = caller_arg(x), call = caller_env()) { # From standalone-cli.R cli <- env_get_list( nms = c("format_arg", "format_code"), last = topenv(), default = function(x) sprintf("`%s`", x), inherit = TRUE ) if (allow_na) { what <- c(what, cli$format_code("NA")) } if (allow_null) { what <- c(what, cli$format_code("NULL")) } if (length(what)) { what <- oxford_comma(what) } message <- sprintf( "%s must be %s, not %s.", cli$format_arg(arg), what, obj_type_friendly(x, value = show_value) ) abort(message, ..., call = call, arg = arg) } oxford_comma <- function(chr, sep = ", ", final = "or") { n <- length(chr) if (n < 2) { return(chr) } head <- chr[seq_len(n - 1)] last <- chr[n] head <- paste(head, collapse = sep) # Write a or b. But a, b, or c. if (n > 2) { paste0(head, sep, final, " ", last) } else { paste0(head, " ", final, " ", last) } } # nocov end testthat/R/reporter-fail.R0000644000176200001440000000125515047715224015223 0ustar liggesusers#' Fail if any tests fail #' #' This reporter will simply throw an error if any of the tests failed. It is #' best combined with another reporter, such as the #' [SummaryReporter]. #' #' @export #' @family reporters FailReporter <- R6::R6Class( "FailReporter", inherit = Reporter, public = list( failed = FALSE, initialize = function(...) { self$capabilities$parallel_support <- TRUE super$initialize(...) }, add_result = function(context, test, result) { self$failed <- self$failed || expectation_broken(result) }, end_reporter = function() { if (self$failed) { cli::cli_abort("Failures detected.") } } ) ) testthat/R/test-state.R0000644000176200001440000000511215072252215014533 0ustar liggesusers#' Check for global state changes #' #' @description #' One of the most pernicious challenges to debug is when a test runs fine #' in your test suite, but fails when you run it interactively (or similarly, #' it fails randomly when running your tests in parallel). One of the most #' common causes of this problem is accidentally changing global state in a #' previous test (e.g. changing an option, an environment variable, or the #' working directory). This is hard to debug, because it's very hard to figure #' out which test made the change. #' #' Luckily testthat provides a tool to figure out if tests are changing global #' state. You can register a state inspector with `set_state_inspector()` and #' testthat will run it before and after each test, store the results, then #' report if there are any differences. For example, if you wanted to see if #' any of your tests were changing options or environment variables, you could #' put this code in `tests/testthat/helper-state.R`: #' #' ```R #' set_state_inspector(function() { #' list( #' options = options(), #' envvars = Sys.getenv() #' ) #' }) #' ``` #' #' (You might discover other packages outside your control are changing #' the global state, in which case you might want to modify this function #' to ignore those values.) #' #' Other problems that can be troublesome to resolve are CRAN check notes that #' report things like connections being left open. You can easily debug #' that problem with: #' #' ```R #' set_state_inspector(function() { #' getAllConnections() #' }) #' ``` #' #' @export #' @param callback Either a zero-argument function that returns an object #' capturing global state that you're interested in, or `NULL`. #' @inheritParams waldo::compare set_state_inspector <- function(callback, tolerance = testthat_tolerance()) { if ( !is.null(callback) && !(is.function(callback) && length(formals(callback)) == 0) ) { cli::cli_abort("{.arg callback} must be a zero-arg function, or NULL") } the$state_inspector <- callback the$state_inspector_tolerance <- tolerance invisible() } testthat_state_condition <- function(before, after, call) { diffs <- waldo_compare( before, after, x_arg = "before", y_arg = "after", tolerance = the$state_inspector_tolerance ) if (length(diffs) == 0) { return(NULL) } srcref <- attr(call, "srcref") warning_cnd( message = c("Global state has changed:", set_names(format(diffs), "")), srcref = srcref ) } inspect_state <- function() { if (is.null(the$state_inspector)) { NULL } else { the$state_inspector() } } testthat/R/reporter-tap.R0000644000176200001440000000313115047715224015067 0ustar liggesusers#' Report results in TAP format #' #' This reporter will output results in the Test Anything Protocol (TAP), #' a simple text-based interface between testing modules in a test harness. #' For more information about TAP, see http://testanything.org #' #' @export #' @family reporters TapReporter <- R6::R6Class( "TapReporter", inherit = Reporter, public = list( results = list(), n = 0L, has_tests = FALSE, contexts = NA_character_, initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE }, start_context = function(context) { self$contexts[self$n + 1] <- context }, add_result = function(context, test, result) { self$has_tests <- TRUE self$n <- self$n + 1L self$results[[self$n]] <- result }, end_reporter = function() { if (!self$has_tests) { return() } self$cat_line("1..", self$n) for (i in 1:self$n) { if (!is.na(self$contexts[i])) { self$cat_line("# Context ", self$contexts[i]) } result <- self$results[[i]] if (expectation_success(result)) { self$cat_line("ok ", i, " ", result$test) } else if (expectation_broken(result)) { self$cat_line("not ok ", i, " ", result$test) msg <- gsub("(^|\n)", "\\1 ", format(result)) self$cat_line(msg) } else { self$cat_line( "ok ", i, " # ", toupper(expectation_type(result)), " ", format(result) ) } } } ) ) testthat/R/srcrefs.R0000644000176200001440000000433015053661134014111 0ustar liggesusersfind_expectation_srcref <- function( test_code_frame = NULL, top = caller_env() ) { # It's not possible to give useful srcrefs interactively so don't even try path <- getOption("testthat_path") if (is.null(path)) { return(NULL) } # Scope our search to the current file loaded with source_file() file_srcref <- srcref(srcfile(path), c(1, 1, 1e5, 1e5)) # Now attempt to narrow the scope to a call that leads to test_code(). That's # usually test_that() but might be describe(), it(), or another wrapper. testthat_srcref <- find_srcref( top = test_code_frame, container = file_srcref ) # Now we can find the bottom-most call with a srcref that's inside that scope call_srcref <- find_srcref( top = top, bottom = test_code_frame, container = testthat_srcref %||% file_srcref ) # If we can't find that we fall back to the test call_srcref %||% testthat_srcref } find_srcref <- function(bottom = NULL, top = caller_env(), container = NULL) { idx <- sys_index(bottom, top) calls <- sys.calls()[rev(idx)] for (call in calls) { srcref <- attr(call, "srcref") if (!is.null(srcref)) { if (is.null(container) || srcref_inside(srcref, container)) { return(srcref) } } } NULL } srcref_inside <- function(needle, haystack) { stopifnot(inherits(needle, "srcref"), inherits(haystack, "srcref")) needle_file <- attr(needle, "srcfile")$filename haystack_file <- attr(haystack, "srcfile")$filename if (!identical(needle_file, haystack_file)) { return(FALSE) } sign_pair <- function(x, y) { diff <- y - x if (diff[1] == 0) sign(diff[2]) else sign(diff[1]) } sign_pair(needle[1:2], haystack[1:2]) <= 0 && sign_pair(needle[3:4], haystack[3:4]) >= 0 } sys_index <- function(bottom = NULL, top = caller_env()) { frames <- sys.frames() if (is.null(bottom)) { bottom_idx <- 1 } else { bottom_idx <- Position(function(env) identical(bottom, env), frames) if (is.na(bottom_idx)) { cli::cli_abort("Can't find {.arg bottom} on stack.") } } top_idx <- Position(function(env) identical(top, env), frames) if (is.na(top_idx)) { cli::cli_abort("Can't find {.arg top} on stack.") } seq2(bottom_idx, top_idx) } testthat/R/old-school.R0000644000176200001440000001141515072252215014504 0ustar liggesusers#' Expect that a condition holds. #' #' @description #' `r lifecycle::badge("superseded")` #' #' An old style of testing that's no longer encouraged. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' This style of testing is formally deprecated as of the 3rd edition. #' Use a more specific `expect_` function instead. #' #' @param object Object to test. #' #' Supports limited unquoting to make it easier to generate readable failures #' within a function or for loop. See [quasi_label] for more details. #' @param condition, a function that returns whether or not the condition #' is met, and if not, an error message to display. #' @param label Used to customise failure messages. For expert use only. #' @param info Extra information to be included in the message. This argument #' is soft-deprecated and should not be used in new code. Instead see #' alternatives in [quasi_label]. #' @return the (internal) expectation result as an invisible list #' @keywords internal #' @export #' @seealso [fail()] for an expectation that always fails. #' @examples #' expect_that(5 * 2, equals(10)) #' expect_that(sqrt(2) ^ 2, equals(2)) #' \dontrun{ #' expect_that(sqrt(2) ^ 2, is_identical_to(2)) #' } expect_that <- function(object, condition, info = NULL, label = NULL) { edition_deprecate(3, "expect_that()") condition(object) } #' Old-style expectations. #' #' @description #' `r lifecycle::badge("superseded")` #' #' Initial testthat used a style of testing that looked like #' `expect_that(a, equals(b)))` this allowed expectations to read like #' English sentences, but was verbose and a bit too cutesy. This style #' will continue to work but has been soft-deprecated - it is no longer #' documented, and new expectations will only use the new style #' `expect_equal(a, b)`. #' #' @name oldskool #' @keywords internal NULL #' @export #' @rdname oldskool is_a <- function(class) { function(x) expect_is(x, class) } #' @export #' @rdname oldskool has_names <- function(expected, ignore.order = FALSE, ignore.case = FALSE) { function(x) { expect_named( x, expected = expected, ignore.order = ignore.order, ignore.case = ignore.case ) } } #' @export #' @rdname oldskool is_less_than <- function(expected, label = NULL, ...) { function(x) expect_lt(x, expected) } #' @export #' @rdname oldskool is_more_than <- function(expected, label = NULL, ...) { function(x) expect_gt(x, expected) } #' @export #' @rdname oldskool equals <- function(expected, label = NULL, ...) { function(x) expect_equal(x, expected, ..., expected.label = label) } #' @export #' @rdname oldskool is_equivalent_to <- function(expected, label = NULL) { function(x) expect_equivalent(x, expected, expected.label = label) } #' @export #' @rdname oldskool is_identical_to <- function(expected, label = NULL) { function(x) expect_identical(x, expected, expected.label = label) } #' @export #' @rdname oldskool equals_reference <- function(file, label = NULL, ...) { function(x) expect_known_value(x, file, expected.label = label, ...) } #' @export #' @rdname oldskool shows_message <- function(regexp = NULL, all = FALSE, ...) { function(x) expect_message(x, regexp = regexp, all = all, ...) } #' @export #' @rdname oldskool gives_warning <- function(regexp = NULL, all = FALSE, ...) { function(x) expect_warning(x, regexp = regexp, all = all, ...) } #' @export #' @rdname oldskool prints_text <- function(regexp = NULL, ...) { function(x) expect_output(x, regexp, ...) } #' @export #' @rdname oldskool throws_error <- function(regexp = NULL, ...) { function(x) expect_error(x, regexp, ...) } #' Does code take less than the expected amount of time to run? #' #' This is useful for performance regression testing. #' #' @keywords internal #' @export #' @param amount maximum duration in seconds takes_less_than <- function(amount) { cli::cli_warn( "{.fn takes_less_than} is deprecated because it is stochastic and unreliable." ) function(expr) { duration <- system.time(force(expr))["elapsed"] if (duration >= amount) { fail(paste0("took ", duration, " seconds, which is more than ", amount)) } else { pass() } } } #' Negate an expectation #' #' This negates an expectation, making it possible to express that you #' want the opposite of a standard expectation. This function is deprecated #' and will be removed in a future version. #' #' @param f an existing expectation function #' @keywords internal #' @export not <- function(f) { cli::cli_warn("{.fn not} is deprecated.") stopifnot(is.function(f)) negate <- function(expt) { if (expectation_success(expt)) { msg <- paste0("NOT(", expt$message, ")") fail(msg, srcref = expt$srcref) } else { pass() } } function(...) { negate(capture_expectation(f(...))) } } testthat/R/reporter-silent.R0000644000176200001440000000171515047715224015607 0ustar liggesusers#' Silently collect and all expectations #' #' This reporter quietly runs all tests, simply gathering all expectations. #' This is helpful for programmatically inspecting errors after a test run. #' You can retrieve the results with `$expectations()`. #' #' @export #' @family reporters SilentReporter <- R6::R6Class( "SilentReporter", inherit = Reporter, public = list( .expectations = NULL, initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$.expectations <- Stack$new() }, add_result = function(context, test, result) { self$.expectations$push(result) }, expectations = function() { self$.expectations$as_list() } ) ) # Useful for testing test_that() and friends which otherwise swallow # all expectations by design capture_expectations <- function(code) { reporter <- SilentReporter$new() with_reporter(reporter, code) reporter$expectations() } testthat/R/reporter-llm.R0000644000176200001440000000470715127561732015103 0ustar liggesusers#' Report test progress for LLMs #' #' @description #' `LlmReporter` is designed for use with Large Language Models (LLMs). #' It reports problems (warnings, skips, errors, and failures) as they #' occur and the total number of successes at the end. #' #' `LlmReporter` is used by default when tests are run by a coding agent. #' Currently we detect Claude Code, Cursor, and Gemini CLI. #' If using another tool, configure it to set env var `AGENT=1`. #' #' @export #' @family reporters LlmReporter <- R6::R6Class( "LlmReporter", inherit = ProgressReporter, public = list( initialize = function(max_failures = testthat_max_fails(), ...) { super$initialize( show_praise = FALSE, max_failures = max_failures, ... ) self$width <- 20 self$rstudio <- FALSE self$hyperlinks <- FALSE }, # No header show_header = function() {}, # No status updates show_status = function(complete = FALSE, time = 0, pad = FALSE) {}, add_result = function(context, test, result) { if (self$is_full()) { return() } if (expectation_broken(result)) { self$n_fail <- self$n_fail + 1 self$report_issue(result) } else if (expectation_skip(result)) { self$n_skip <- self$n_skip + 1 self$report_issue(result) } else if (expectation_warning(result)) { self$n_warn <- self$n_warn + 1 self$report_issue(result) } else { self$n_ok <- self$n_ok + 1 # Do nothing for passing tests } }, report_issue = function(result) { self$local_user_output() type <- toupper(expectation_type(result)) header <- paste0(type, ": ", expectation_location(result)) rule <- strrep("-", max(50 - nchar(header), 10)) self$cat_line(header, " ", rule) self$cat_line(format(result)) self$cat_line() }, end_context = function(context) { if (self$is_full()) { self$report_full() } }, end_reporter = function() { if (self$is_full()) { return() } self$cat_line(paste_c( "[ ", c("FAIL ", self$n_fail, " | "), c("WARN ", self$n_warn, " | "), c("SKIP ", self$n_skip, " | "), c("PASS ", self$n_ok), " ]" )) } ) ) is_llm <- function() { nzchar(Sys.getenv("AGENT")) || nzchar(Sys.getenv("CLAUDECODE")) || nzchar(Sys.getenv("GEMINI_CLI")) || nzchar(Sys.getenv("CURSOR_AGENT")) } testthat/R/local.R0000644000176200001440000001677515056632045013556 0ustar liggesusers#' Temporarily set options for maximum reproducibility #' #' @description #' `local_test_context()` is run automatically by `test_that()` but you may #' want to run it yourself if you want to replicate test results interactively. #' If run inside a function, the effects are automatically reversed when the #' function exits; if running in the global environment, use #' [withr::deferred_run()] to undo. #' #' `local_reproducible_output()` is run automatically by `test_that()` in the #' 3rd edition. You might want to call it to override the the default settings #' inside a test, if you want to test Unicode, coloured output, or a #' non-standard width. #' #' @details #' `local_test_context()` sets `TESTTHAT = "true"`, which ensures that #' [is_testing()] returns `TRUE` and allows code to tell if it is run by #' testthat. #' #' In the third edition, `local_test_context()` also calls #' `local_reproducible_output()` which temporary sets the following options: #' #' * `cli.dynamic = FALSE` so that tests assume that they are not run in #' a dynamic console (i.e. one where you can move the cursor around). #' * `cli.unicode` (default: `FALSE`) so that the cli package never generates #' unicode output (normally cli uses unicode on Linux/Mac but not Windows). #' Windows can't easily save unicode output to disk, so it must be set to #' false for consistency. #' * `cli.condition_width = Inf` so that new lines introduced while #' width-wrapping condition messages don't interfere with message matching. #' * `crayon.enabled` (default: `FALSE`) suppresses ANSI colours generated by #' the cli and crayon packages (normally colours are used if cli detects #' that you're in a terminal that supports colour). #' * `cli.num_colors` (default: `1L`) Same as the crayon option. #' * `lifecycle_verbosity = "warning"` so that every lifecycle problem always #' generates a warning (otherwise deprecated functions don't generate a #' warning every time). #' * `max.print = 99999` so the same number of values are printed. #' * `OutDec = "."` so numbers always uses `.` as the decimal point #' (European users sometimes set `OutDec = ","`). #' * `rlang_interactive = FALSE` so that [rlang::is_interactive()] returns #' `FALSE`, and code that uses it pretends you're in a non-interactive #' environment. #' * `useFancyQuotes = FALSE` so base R functions always use regular (straight) #' quotes (otherwise the default is locale dependent, see [sQuote()] for #' details). #' * `width` (default: 80) to control the width of printed output (usually this #' varies with the size of your console). #' #' And modifies the following env vars: #' #' * Unsets `RSTUDIO`, which ensures that RStudio is never detected as running. #' * Sets `LANGUAGE = "en"`, which ensures that no message translation occurs. #' #' Finally, it sets the collation locale to "C", which ensures that character #' sorting the same regardless of system locale. #' #' @export #' @param .env Environment to use for scoping; expert use only. #' @examples #' local({ #' local_test_context() #' cat(cli::col_blue("Text will not be colored")) #' cat(cli::symbol$ellipsis) #' cat("\n") #' }) local_test_context <- function(.env = parent.frame()) { withr::local_envvar( "_R_CHECK_BROWSER_NONINTERACTIVE_" = "true", TESTTHAT = "true", .local_envir = .env ) if (edition_get() >= 3) { local_reproducible_output(.env = .env) } } #' @export #' @param width Value of the `"width"` option. #' @param crayon Determines whether or not crayon (now cli) colour #' should be applied. #' @param unicode Value of the `"cli.unicode"` option. #' The test is skipped if `` l10n_info()$`UTF-8` `` is `FALSE`. #' @param rstudio Should we pretend that we're inside of RStudio? #' @param hyperlinks Should we use ANSI hyperlinks. #' @param lang Optionally, supply a BCP47 language code to set the language #' used for translating error messages. This is a lower case two letter #' [ISO 639 country code](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes), #' optionally followed by "_" or "-" and an upper case two letter #' [ISO 3166 region code](https://en.wikipedia.org/wiki/ISO_3166-2). #' @rdname local_test_context #' @examples #' test_that("test ellipsis", { #' local_reproducible_output(unicode = FALSE) #' expect_equal(cli::symbol$ellipsis, "...") #' #' local_reproducible_output(unicode = TRUE) #' expect_equal(cli::symbol$ellipsis, "\u2026") #' }) local_reproducible_output <- function( width = 80, crayon = FALSE, unicode = FALSE, rstudio = FALSE, hyperlinks = FALSE, lang = "C", .env = parent.frame() ) { if (unicode) { # If you force unicode display, you _must_ skip the test on non-utf8 # locales; otherwise it's guaranteed to fail skip_if(!l10n_info()$`UTF-8`, "non utf8 locale") } local_width(width = width, .env = .env) withr::local_options( crayon.enabled = crayon, cli.hyperlink = hyperlinks, cli.hyperlink_run = hyperlinks, cli.hyperlink_help = hyperlinks, cli.hyperlink_vignette = hyperlinks, cli.dynamic = FALSE, cli.unicode = unicode, cli.condition_width = Inf, cli.num_colors = if (crayon) 8L else 1L, useFancyQuotes = FALSE, lifecycle_verbosity = "warning", OutDec = ".", rlang_interactive = FALSE, max.print = 99999, .local_envir = .env, ) withr::local_envvar( RSTUDIO = if (rstudio) 1 else NA, RSTUDIO_SESSION_PID = if (rstudio) Sys.getpid() else NA, RSTUDIO_CHILD_PROCESS_PANE = if (rstudio) "build" else NA, RSTUDIO_CLI_HYPERLINKS = if (rstudio) 1 else NA, .local_envir = .env ) withr::local_language(lang, .local_envir = .env) withr::local_collate("C", .local_envir = .env) } local_reporter_output <- function(.env = parent.frame()) { reporter <- get_reporter() if (!is.null(reporter)) { reporter$local_user_output(.env) } } waldo_compare <- function(x, y, ..., x_arg = "x", y_arg = "y") { # Need to very carefully isolate this change to this function - can not set # in expectation functions because part of expectation handling bubbles # up through calling handlers, which are run before on.exit() if (!is_snapshot()) { local_reporter_output() } waldo::compare(x, y, ..., x_arg = x_arg, y_arg = y_arg) } local_width <- function(width = 80, .env = parent.frame()) { withr::local_options(width = width, cli.width = width, .local_envir = .env) withr::local_envvar(RSTUDIO_CONSOLE_WIDTH = width, .local_envir = .env) } #' Locally set test directory options #' #' For expert use only. #' #' @param path Path to directory of files #' @param package Optional package name, if known. #' @export #' @keywords internal local_test_directory <- function(path, package = NULL, .env = parent.frame()) { # Set edition before changing working directory in case path is relative local_edition(find_edition(path, package), .env = .env) # Capture current working directory so we can use for relative paths wd <- getwd() withr::local_dir(path, .local_envir = .env) withr::local_envvar( R_TESTS = "", TESTTHAT = "true", TESTTHAT_PKG = package, TESTTHAT_WD = wd, .local_envir = .env ) } local_interactive_reporter <- function(.env = parent.frame()) { local_assume_not_on_cran(.env) withr::local_options(testthat_interactive = TRUE, .local_envir = .env) # Use edition from working directory local_edition(find_edition("."), .env = .env) # Use StopReporter reporter <- StopReporter$new() old <- set_reporter(reporter) withr::defer(reporter$end_reporter(), envir = .env) withr::defer(set_reporter(old), envir = .env) reporter } testthat/R/try-again.R0000644000176200001440000000233315104404205014325 0ustar liggesusers#' Evaluate an expectation multiple times until it succeeds #' #' If you have a flaky test, you can use `try_again()` to run it a few times #' until it succeeds. In most cases, you are better fixing the underlying #' cause of the flakeyness, but sometimes that's not possible. #' #' @param times Number of times to retry. #' @param code Code to evaluate. #' @export #' @examples #' usually_return_1 <- function(i) { #' if (runif(1) < 0.1) 0 else 1 #' } #' #' \dontrun{ #' # 10% chance of failure: #' expect_equal(usually_return_1(), 1) #' #' # 1% chance of failure: #' try_again(1, expect_equal(usually_return_1(), 1)) #' #' # 0.1% chance of failure: #' try_again(2, expect_equal(usually_return_1(), 1)) #' } try_again <- function(times, code) { check_number_whole(times, min = 1) code <- enquo(code) i <- 1 while (i <= times) { tryCatch( return(eval(get_expr(code), get_env(code))), expectation_failure = function(cnd) { cli::cli_inform(c(i = "Expectation failed; trying again ({i})...")) NULL }, error = function(cnd) { cli::cli_inform(c(i = "Expectation errored; trying again ({i})...")) NULL } ) i <- i + 1 } eval(get_expr(code), get_env(code)) } testthat/R/test-files.R0000644000176200001440000003100415127731656014530 0ustar liggesusers#' Run all tests in a directory #' #' @description #' This function is the low-level workhorse that powers [test_local()] and #' [test_package()]. Generally, you should not call this function directly. #' In particular, you are responsible for ensuring that the functions to test #' are available in the test `env` (e.g. via `load_package`). #' #' See `vignette("special-files")` to learn more about the conventions for test, #' helper, and setup files that testthat uses, and what you might use each for. #' #' @section Environments: #' Each test is run in a clean environment to keep tests as isolated as #' possible. For package tests, that environment inherits from the package's #' namespace environment, so that tests can access internal functions #' and objects. #' #' @param path Path to directory containing tests. #' @param package If these tests belong to a package, the name of the package. #' @param filter If not `NULL`, only tests with file names matching this #' regular expression will be executed. Matching is performed on the file #' name after it's stripped of `"test-"` and `".R"`. #' @param env Environment in which to execute the tests. Expert use only. #' @param ... Additional arguments passed to [grepl()] to control filtering. #' @param load_helpers Source helper files before running the tests? #' @param stop_on_failure If `TRUE`, throw an error if any tests fail. #' @param stop_on_warning If `TRUE`, throw an error if any tests generate #' warnings. #' @param load_package Strategy to use for load package code: #' * "none", the default, doesn't load the package. #' * "installed", uses [library()] to load an installed package. #' * "source", uses [pkgload::load_all()] to a source package. #' To configure the arguments passed to `load_all()`, add this #' field in your DESCRIPTION file: #' #' ``` #' Config/testthat/load-all: list(export_all = FALSE, helpers = FALSE) #' ``` #' @return A list (invisibly) containing data about the test results. #' @inheritParams with_reporter #' @inheritParams source_file #' @export test_dir <- function( path, filter = NULL, reporter = NULL, env = NULL, ..., load_helpers = TRUE, stop_on_failure = TRUE, stop_on_warning = FALSE, package = NULL, load_package = c("none", "installed", "source"), shuffle = FALSE ) { load_package <- arg_match(load_package) start_first <- find_test_start_first(path, load_package, package) test_paths <- find_test_scripts( path, filter = filter, ..., full.names = FALSE, start_first = start_first ) if (length(test_paths) == 0) { cli::cli_abort("No test files found.") } want_parallel <- find_parallel(path, load_package, package) && length(test_paths) > 1 reporter <- find_reporter(reporter %||% default_reporter(want_parallel)) parallel <- want_parallel && reporter$capabilities$parallel_support test_files( test_dir = path, test_paths = test_paths, test_package = package, reporter = reporter, load_helpers = load_helpers, env = env, stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning, load_package = load_package, parallel = parallel, shuffle = shuffle ) } #' Run tests in a single file #' #' Helper, setup, and teardown files located in the same directory as the #' test will also be run. See `vignette("special-files")` for details. #' #' @inherit test_dir return params #' @inheritSection test_dir Environments #' @param path Path to file. #' @param ... Additional parameters passed on to `test_dir()` #' @param desc Optionally, supply a string here to run only a single #' test (`test_that()` or `describe()`) with this `desc`ription. #' @export #' @examples #' path <- testthat_example("success") #' test_file(path) #' test_file(path, desc = "some tests have warnings") #' test_file(path, reporter = "minimal") test_file <- function( path, reporter = default_compact_reporter(), desc = NULL, package = NULL, shuffle = FALSE, ... ) { if (!file.exists(path)) { cli::cli_abort("{.arg path} does not exist.") } test_files( test_dir = dirname(path), test_package = package, test_paths = basename(path), reporter = reporter, desc = desc, shuffle = shuffle, ... ) } test_files <- function( test_dir, test_package, test_paths, load_helpers = TRUE, reporter = default_reporter(), env = NULL, stop_on_failure = FALSE, stop_on_warning = FALSE, desc = NULL, load_package = c("none", "installed", "source"), parallel = FALSE, shuffle = FALSE, error_call = caller_env() ) { # Must keep these two blocks in sync if (parallel) { test_files_parallel( test_dir = test_dir, test_package = test_package, test_paths = test_paths, load_helpers = load_helpers, reporter = reporter, env = env, stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning, load_package = load_package, shuffle = shuffle ) } else { test_files_serial( test_dir = test_dir, test_package = test_package, test_paths = test_paths, load_helpers = load_helpers, reporter = reporter, env = env, stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning, desc = desc, load_package = load_package, shuffle = shuffle, error_call = error_call ) } } test_files_serial <- function( test_dir, test_package, test_paths, load_helpers = TRUE, reporter = default_reporter(), env = NULL, stop_on_failure = FALSE, stop_on_warning = FALSE, desc = NULL, load_package = c("none", "installed", "source"), shuffle = FALSE, error_call = caller_env() ) { # Because load_all() called by test_files_setup_env() will have already # loaded them. We don't want to rely on testthat's loading since that # only affects the test environment and we want to keep the helpers # loaded in the user's session. load_package <- arg_match(load_package) if (load_package == "source") { load_helpers <- FALSE } env <- test_files_setup_env(test_package, test_dir, load_package, env) # record testing env for mocks local_testing_env(env) test_files_setup_state(test_dir, test_package, load_helpers, env) reporters <- test_files_reporter(reporter, "serial", desc = desc) with_reporter( reporters$multi, lapply( test_paths, test_one_file, env = env, desc = desc, shuffle = shuffle, error_call = error_call ) ) test_files_check( reporters$list$get_results(), stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning ) } test_files_setup_env <- function( test_package, test_dir, load_package = c("none", "installed", "source"), env = NULL ) { library(testthat) load_package <- arg_match(load_package) if (load_package == "installed") { library(test_package, character.only = TRUE) } else if (load_package == "source") { # Allow configuring what we export to the search path (#1636) args <- find_load_all_args(test_dir) pkgload::load_all( test_dir, export_all = args[["export_all"]], helpers = args[["helpers"]], quiet = TRUE ) } env %||% test_env(test_package) } find_load_all_args <- function(path) { default <- list(export_all = TRUE, helpers = TRUE) desc <- find_description(path) if (is.null(desc)) { return(default) } args <- desc$get_field("Config/testthat/load-all", default = NULL) if (is.null(args)) { return(default) } args <- parse_expr(args) if (!is_call(args, "list")) { cli::cli_abort("{.field Config/testthat/load-all} must be a list.") } args <- as.list(args[-1]) list( export_all = args[["export_all"]] %||% default[["export_all"]], helpers = args[["helpers"]] %||% default[["helpers"]] ) } test_files_setup_state <- function( test_dir, test_package, load_helpers, env, frame = parent.frame() ) { # Define testing environment local_test_directory(test_dir, test_package, .env = frame) withr::local_options( topLevelEnvironment = env_parent(env), .local_envir = frame ) # Load helpers, setup, and teardown (on exit) local_teardown_env(frame) if (load_helpers) { source_test_helpers(".", env) } source_test_setup(".", env) withr::defer(source_test_teardown(".", env), frame) # old school } test_files_reporter <- function( reporter, mode = c("serial", "parallel"), desc = NULL, frame = caller_env() ) { mode <- arg_match(mode) # User selected reporter user <- find_reporter(reporter) # Reporter that collect test results lister <- ListReporter$new() # Snapshot reporter if (mode == "parallel") { snap_base <- MainprocessSnapshotReporter } else { snap_base <- SnapshotReporter } snap <- local_snapshotter( reporter = snap_base, fail_on_new = on_ci(), desc = desc, frame = frame ) reporters <- compact(list(user, lister, snap)) list( multi = MultiReporter$new(reporters = reporters), list = lister ) } test_files_check <- function( results, stop_on_failure = TRUE, stop_on_warning = FALSE ) { if (stop_on_failure && !all_passed(results)) { cli::cli_abort( "Test failures.", call = NULL, trace = data.frame() ) } if (stop_on_warning && any_warnings(results)) { cli::cli_abort( "Tests generated warnings.", call = NULL, trace = data.frame() ) } invisible(results) } test_one_file <- function( path, env = test_env(), desc = NULL, shuffle = FALSE, error_call = caller_env() ) { reporter <- get_reporter() withr::defer(teardown_run()) reporter$start_file(path) source_file( path, env = env(env), desc = desc, shuffle = shuffle, error_call = error_call ) reporter$end_context_if_started() reporter$end_file() } # Helpers ----------------------------------------------------------------- #' Run code after all test files #' #' This environment has no purpose other than as a handle for [withr::defer()]: #' use it when you want to run code after all tests have been run. #' Typically, you'll use `withr::defer(cleanup(), teardown_env())` #' immediately after you've made a mess in a `setup-*.R` file. #' #' @export teardown_env <- function() { if (is.null(the$teardown_env)) { cli::cli_abort( "{.fn teardown_env} has not been initialized.", .internal = TRUE ) } the$teardown_env } local_teardown_env <- function(frame = parent.frame()) { local_bindings(teardown_env = frame, .env = the, .frame = frame) } #' Find test files #' #' @param path path to tests #' @param invert If `TRUE` return files which **don't** match. #' @param ... Additional arguments passed to [grepl()] to control filtering. #' @param start_first A character vector of file patterns (globs, see #' [utils::glob2rx()]). The patterns are for the file names (base names), #' not for the whole paths. testthat starts the files matching the #' first pattern first, then the ones matching the second, etc. and then #' the rest of the files, alphabetically. Parallel tests tend to finish #' quicker if you start the slowest files first. `NULL` means alphabetical #' order. #' @inheritParams test_dir #' @return A character vector of paths #' @keywords internal #' @export find_test_scripts <- function( path, filter = NULL, invert = FALSE, ..., full.names = TRUE, start_first = NULL ) { files <- dir(path, "^test.*\\.[rR]$", full.names = full.names) files <- filter_test_scripts(files, filter, invert, ...) order_test_scripts(files, start_first) } filter_test_scripts <- function(files, filter = NULL, invert = FALSE, ...) { if (is.null(filter)) { return(files) } which_files <- grepl(filter, context_name(files), ...) if (isTRUE(invert)) { which_files <- !which_files } files[which_files] } find_test_start_first <- function(path, load_package, package) { # Make sure we get the local package package if not "installed" if (load_package != "installed") { package <- NULL } desc <- find_description(path, package) if (is.null(desc)) { return(NULL) } conf <- desc$get_field("Config/testthat/start-first", NULL) if (is.null(conf)) { return(NULL) } trimws(strsplit(conf, ",")[[1]]) } order_test_scripts <- function(paths, start_first) { if (is.null(start_first)) { return(paths) } filemap <- data.frame( stringsAsFactors = FALSE, base = sub("\\.[rR]$", "", sub("^test[-_\\.]?", "", basename(paths))), orig = paths ) rxs <- utils::glob2rx(start_first) mch <- lapply(rxs, function(rx) filemap$orig[grep(rx, filemap$base)]) unique(c(unlist(mch), paths)) } testthat/R/reporter-junit.R0000644000176200001440000001451715104404205015432 0ustar liggesusers# To allow the Java-style class name format that Jenkins prefers, # "package_name_or_domain.ClassName", allow "."s in the class name. classnameOK <- function(text) { gsub("[^._A-Za-z0-9]+", "_", text) } #' Report results in jUnit XML format #' #' @description #' This reporter includes detailed results about each test and summaries, #' written to a file (or stdout) in jUnit XML format. This can be read by #' the Jenkins Continuous Integration System to report on a dashboard etc. #' Requires the _xml2_ package. #' #' To fit into the jUnit structure, `context()` becomes the `` #' name as well as the base of the ` classname`. The #' `test_that()` name becomes the rest of the ` classname`. #' The deparsed `expect_that()` call becomes the `` name. #' On failure, the message goes into the `` node message #' argument (first line only) and into its text content (full message). #' Execution time and some other details are also recorded. #' #' References for the jUnit XML format: #' #' #' @export #' @family reporters JunitReporter <- R6::R6Class( "JunitReporter", inherit = Reporter, public = list( results = NULL, timer = NULL, doc = NULL, errors = NULL, failures = NULL, skipped = NULL, tests = NULL, root = NULL, suite = NULL, suite_time = NULL, file_name = NULL, elapsed_time = function() { time <- (private$proctime() - self$timer)[["elapsed"]] self$timer <- private$proctime() time }, reset_suite = function() { self$errors <- 0 self$failures <- 0 self$skipped <- 0 self$tests <- 0 self$suite_time <- 0 }, start_reporter = function() { check_installed("xml2", "to use JunitReporter") self$timer <- private$proctime() self$doc <- xml2::xml_new_document() self$root <- xml2::xml_add_child(self$doc, "testsuites") self$reset_suite() }, start_file = function(file) { self$file_name <- file }, start_test = function(context, test) { if (is.null(context)) { context_start_file(self$file_name) } }, start_context = function(context) { self$suite <- xml2::xml_add_child( self$root, "testsuite", name = context, timestamp = private$timestamp(), hostname = private$hostname() ) }, end_context = function(context) { # Always uses . as decimal place in output regardless of options set in test withr::local_options(list(OutDec = ".")) xml2::xml_attr(self$suite, "tests") <- as.character(self$tests) xml2::xml_attr(self$suite, "skipped") <- as.character(self$skipped) xml2::xml_attr(self$suite, "failures") <- as.character(self$failures) xml2::xml_attr(self$suite, "errors") <- as.character(self$errors) #jenkins junit plugin requires time has at most 3 digits xml2::xml_attr(self$suite, "time") <- as.character(round( self$suite_time, 3 )) self$reset_suite() }, add_result = function(context, test, result) { withr::local_options(list(OutDec = ".")) self$tests <- self$tests + 1 time <- self$elapsed_time() self$suite_time <- self$suite_time + time # If no context was started (e.g., warnings outside tests), create a default one if (is.null(self$suite)) { self$start_context(context %||% "(unknown)") } # XML node for test case name <- test %||% "(unnamed)" testcase <- xml2::xml_add_child( self$suite, "testcase", time = toString(time), classname = classnameOK(context), name = classnameOK(name) ) first_line <- function(x) { loc <- expectation_location(x, " (", ")") paste0(strsplit(cli::ansi_strip(x$message), split = "\n")[[1]][1], loc) } # add an extra XML child node if not a success if (expectation_error(result)) { # "type" in Java is the exception class error <- xml2::xml_add_child( testcase, "error", type = "error", message = first_line(result) ) xml2::xml_text(error) <- cli::ansi_strip(format(result)) self$errors <- self$errors + 1 } else if (expectation_failure(result)) { # "type" in Java is the type of assertion that failed failure <- xml2::xml_add_child( testcase, "failure", type = "failure", message = first_line(result) ) xml2::xml_text(failure) <- cli::ansi_strip(format(result)) self$failures <- self$failures + 1 } else if (expectation_skip(result)) { xml2::xml_add_child(testcase, "skipped", message = first_line(result)) self$skipped <- self$skipped + 1 } else if (expectation_warning(result)) { warning_node <- xml2::xml_add_child(testcase, "system-out") xml2::xml_text(warning_node) <- cli::ansi_strip(format(result)) } }, end_reporter = function() { if (is.character(self$out)) { xml2::write_xml(self$doc, self$out, format = TRUE) } else if (inherits(self$out, "connection")) { file <- withr::local_tempfile() xml2::write_xml(self$doc, file, format = TRUE) cat(brio::read_file(file), file = self$out) } else { cli::cli_abort("Unsupported output type: {toString(self$out)}.") } } # end_reporter ), # public private = list( proctime = function() { proc.time() }, timestamp = function() { strftime(Sys.time(), "%Y-%m-%dT%H:%M:%SZ", tz = "UTC") }, hostname = function() { Sys.info()[["nodename"]] } ) # private ) # Fix components of JunitReporter that otherwise vary from run-to-run # # The following functions need to be mocked out to run a unit test # against static contents of reporters/junit.txt: # - proctime - originally wrapper for proc.time() # - timestamp - originally wrapper for toString(Sys.time()) # - hostname - originally wrapper for Sys.info()[["nodename"]] # JunitReporterMock <- R6::R6Class( "JunitReporterMock", inherit = JunitReporter, public = list(), private = list( proctime = function() { c(user = 0, system = 0, elapsed = 0) }, timestamp = function() { "1999:12:31 23:59:59" }, hostname = function() { "nodename" } ) ) testthat/R/expect-equality.R0000644000176200001440000001313515072252215015565 0ustar liggesusers#' Do you expect this value? #' #' @description #' These functions provide two levels of strictness when comparing a #' computation to a reference value. `expect_identical()` is the baseline; #' `expect_equal()` relaxes the test to ignore small numeric differences. #' #' In the 2nd edition, `expect_identical()` uses [identical()] and #' `expect_equal` uses [all.equal()]. In the 3rd edition, both functions use #' [waldo](https://github.com/r-lib/waldo). They differ only in that #' `expect_equal()` sets `tolerance = testthat_tolerance()` so that small #' floating point differences are ignored; this also implies that (e.g.) `1` #' and `1L` are treated as equal. #' #' @param object,expected Computation and value to compare it to. #' #' Both arguments supports limited unquoting to make it easier to generate #' readable failures within a function or for loop. See [quasi_label] for #' more details. #' @param ... #' **3e**: passed on to [waldo::compare()]. See its docs to see other #' ways to control comparison. #' #' **2e**: passed on to [testthat::compare()]/[identical()]. #' @param tolerance #' **3e**: passed on to [waldo::compare()]. If non-`NULL`, will #' ignore small floating point differences. It uses same algorithm as #' [all.equal()] so the tolerance is usually relative (i.e. #' `mean(abs(x - y) / mean(abs(y)) < tolerance`), except when the differences #' are very small, when it becomes absolute (i.e. `mean(abs(x - y) < tolerance`). #' See waldo documentation for more details. #' #' **2e**: passed on to [testthat::compare()], if set. It's hard to #' reason about exactly what tolerance means because depending on the precise #' code path it could be either an absolute or relative tolerance. #' @param label,expected.label Used to customise failure messages. For expert #' use only. #' @seealso #' * [expect_setequal()]/[expect_mapequal()] to test for set equality. #' * [expect_reference()] to test if two names point to same memory address. #' @inheritParams expect_that #' @family expectations #' @examples #' a <- 10 #' expect_equal(a, 10) #' #' # Use expect_equal() when testing for numeric equality #' \dontrun{ #' expect_identical(sqrt(2) ^ 2, 2) #' } #' expect_equal(sqrt(2) ^ 2, 2) #' @name equality-expectations NULL #' @export #' @rdname equality-expectations expect_equal <- function( object, expected, ..., tolerance = if (edition_get() >= 3) testthat_tolerance(), info = NULL, label = NULL, expected.label = NULL ) { act <- quasi_label(enquo(object), label) exp <- quasi_label(enquo(expected), expected.label) check_number_decimal(tolerance, min = 0, allow_null = TRUE) if (edition_get() >= 3) { msg <- "Expected %s to equal %s." expect_waldo_equal_(msg, act, exp, info, ..., tolerance = tolerance) } else { if (!is.null(tolerance)) { comp <- compare(act$val, exp$val, ..., tolerance = tolerance) } else { comp <- compare(act$val, exp$val, ...) } if (comp$equal) { pass() } else { msg <- c( sprintf("Expected %s to equal %s.", act$lab, exp$lab), "Differences:", comp$message ) fail(msg, info = info) } } invisible(act$val) } #' @export #' @rdname equality-expectations expect_identical <- function( object, expected, info = NULL, label = NULL, expected.label = NULL, ... ) { act <- quasi_label(enquo(object), label) exp <- quasi_label(enquo(expected), expected.label) if (edition_get() >= 3) { msg <- "Expected %s to be identical to %s." expect_waldo_equal_(msg, act, exp, info, ...) } else { if (identical(act$val, exp$val, ...)) { pass() } else { compare <- compare(act$val, exp$val) if (compare$equal) { msg_act <- "Objects equal but not identical" } else { msg_act <- compare$message } msg <- c( sprintf("Expected %s to be identical to %s.", act$lab, exp$lab), "Differences:", msg_act ) fail(msg, info = info) } } invisible(act$val) } expect_waldo_equal_ <- function( msg, act, exp, info = NULL, ..., trace_env = caller_env() ) { comp <- waldo_compare( act$val, exp$val, ..., x_arg = "actual", y_arg = "expected" ) if (length(comp) == 0) { pass() } else { msg <- c( sprintf(msg, act$lab, exp$lab), "Differences:", paste0(comp, "\n") ) fail(msg, info = info, trace_env = trace_env) } } #' Is an object equal to the expected value, ignoring attributes? #' #' Compares `object` and `expected` using [all.equal()] and #' `check.attributes = FALSE`. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `expect_equivalent()` is deprecated in the 3rd edition. Instead use #' `expect_equal(ignore_attr = TRUE)`. #' #' @inheritParams expect_equal #' @param ... Passed on to [compare()]. #' @keywords internal #' @export #' @examples #' #' # expect_equivalent() ignores attributes #' a <- b <- 1:3 #' names(b) <- letters[1:3] #' \dontrun{ #' expect_equal(a, b) #' } #' expect_equivalent(a, b) expect_equivalent <- function( object, expected, ..., info = NULL, label = NULL, expected.label = NULL ) { act <- quasi_label(enquo(object), label) exp <- quasi_label(enquo(expected), expected.label) edition_deprecate( 3, "expect_equivalent()", "Use expect_equal(ignore_attr = TRUE)" ) comp <- compare(act$val, exp$val, ..., check.attributes = FALSE) if (!comp$equal) { msg <- sprintf( "Expected %s to be equivalent to %s.\n%s", act$lab, exp$lab, comp$message ) fail(msg, info = info) } else { pass() } invisible(act$val) } testthat/R/expect-silent.R0000644000176200001440000000157215072252215015230 0ustar liggesusers#' Do you expect code to execute silently? #' #' Checks that the code produces no output, messages, or warnings. #' #' @inheritParams expect_error #' @return The first argument, invisibly. #' @family expectations #' @export #' @examples #' expect_silent("123") #' #' f <- function() { #' message("Hi!") #' warning("Hey!!") #' print("OY!!!") #' } #' \dontrun{ #' expect_silent(f()) #' } expect_silent <- function(object) { act <- quasi_capture(enquo(object), NULL, evaluate_promise) outputs <- c( if (!identical(act$cap$output, "")) "output", if (length(act$cap$warnings) > 0) "warnings", if (length(act$cap$messages) > 0) "messages" ) if (length(outputs) != 0) { fail(c( sprintf("Expected %s to run silently.", act$lab), sprintf("Actual noise: %s.", paste(outputs, collapse = ", ")) )) } else { pass() } invisible(act$cap$result) } testthat/R/expect-setequal.R0000644000176200001440000001516315104404205015550 0ustar liggesusers#' Do you expect a vector containing these values? #' #' * `expect_setequal(x, y)` tests that every element of `x` occurs in `y`, #' and that every element of `y` occurs in `x`. #' * `expect_contains(x, y)` tests that `x` contains every element of `y` #' (i.e. `y` is a subset of `x`). #' * `expect_in(x, y)` tests that every element of `x` is in `y` #' (i.e. `x` is a subset of `y`). #' * `expect_disjoint(x, y)` tests that no element of `x` is in `y` #' (i.e. `x` is disjoint from `y`). #' * `expect_mapequal(x, y)` treats lists as if they are mappings between names #' and values. Concretely, checks that `x` and `y` have the same names, then #' checks that `x[names(y)]` equals `y`. #' #' Note that `expect_setequal()` ignores names, and you will be warned if both #' `object` and `expected` have them. #' #' @inheritParams expect_equal #' @export #' @examples #' expect_setequal(letters, rev(letters)) #' show_failure(expect_setequal(letters[-1], rev(letters))) #' #' x <- list(b = 2, a = 1) #' expect_mapequal(x, list(a = 1, b = 2)) #' show_failure(expect_mapequal(x, list(a = 1))) #' show_failure(expect_mapequal(x, list(a = 1, b = "x"))) #' show_failure(expect_mapequal(x, list(a = 1, b = 2, c = 3))) expect_setequal <- function(object, expected) { act <- quasi_label(enquo(object)) exp <- quasi_label(enquo(expected)) check_vector(act$val, error_arg = "object") check_vector(exp$val, error_arg = "expected") if (!is.null(names(act$val)) && !is.null(names(exp$val))) { testthat_warn("expect_setequal() ignores names") } expect_setequal_("Expected %s to have the same values as %s.", act, exp) } expect_setequal_ <- function(msg, act, exp, trace_env = caller_env()) { act_miss <- unique(act$val[!act$val %in% exp$val]) exp_miss <- unique(exp$val[!exp$val %in% act$val]) if (length(exp_miss) == 0 && length(act_miss) == 0) { pass() } else { msg_exp <- sprintf(msg, act$lab, exp$lab) msg_act <- c( sprintf("Actual: %s", values(act$val)), sprintf("Expected: %s", values(exp$val)), if (length(act_miss)) sprintf("Needs: %s", values(act_miss)), if (length(exp_miss)) sprintf("Absent: %s", values(exp_miss)) ) fail(c(msg_exp, msg_act), trace_env = trace_env) } invisible(act$val) } values <- function(x) { has_extra <- length(x) > 10 if (has_extra) { x <- x[1:9] } if (is.character(x)) { x <- encodeString(x, quote = '"') } out <- paste0(x, collapse = ", ") if (has_extra) { out <- paste0(out, ", ...") } out } is_vector <- function(x) is.list(x) || (is.atomic(x) && !is.null(x)) #' @export #' @rdname expect_setequal expect_mapequal <- function(object, expected) { act <- quasi_label(enquo(object)) exp <- quasi_label(enquo(expected)) check_vector(act$val, error_arg = "object") check_map_names(act$val, error_arg = "object") check_vector(exp$val, error_arg = "expected") check_map_names(exp$val, error_arg = "expected") act_nms <- names(act$val) exp_nms <- names(exp$val) # Length-0 vectors are OK whether named or unnamed. if (length(act$val) == 0 && length(exp$val) == 0) { testthat_warn("`object` and `expected` are empty vectors.") pass() } else { if (!setequal(act_nms, exp_nms)) { msg <- "Expected %s to have the same names as %s." act_names <- labelled_value(names(act$val), act$lab) exp_names <- labelled_value(names(exp$val), exp$lab) expect_setequal_(msg, act_names, exp_names) } else { if (edition_get() >= 3) { act <- labelled_value(act$val[exp_nms], act$lab) msg <- "Expected %s to contain the same values as %s." expect_waldo_equal_(msg, act, exp, tolerance = testthat_tolerance()) } else { # Packages depend on 2e behaviour, but the expectation isn't written # to be reused, and we don't want to bother expect_equal(act$val[exp_nms], exp$val) } } } invisible(act$val) } #' @export #' @rdname expect_setequal expect_contains <- function(object, expected) { act <- quasi_label(enquo(object)) exp <- quasi_label(enquo(expected)) check_vector(act$val, error_arg = "object") check_vector(exp$val, error_arg = "expected") exp_miss <- !exp$val %in% act$val if (any(exp_miss)) { msg_exp <- sprintf( "Expected %s to contain all values in %s.", act$lab, exp$lab ) msg_act <- c( sprintf("Actual: %s", values(act$val)), sprintf("Expected: %s", values(exp$val)), sprintf("Missing: %s", values(exp$val[exp_miss])) ) fail(c(msg_exp, msg_act)) } else { pass() } invisible(act$val) } #' @export #' @rdname expect_setequal expect_in <- function(object, expected) { act <- quasi_label(enquo(object)) exp <- quasi_label(enquo(expected)) check_vector(act$val, error_arg = "object") check_vector(exp$val, error_arg = "expected") act_miss <- !act$val %in% exp$val if (any(act_miss)) { msg_exp <- sprintf( "Expected %s to only contain values from %s.", act$lab, exp$lab ) msg_act <- c( sprintf("Actual: %s", values(act$val)), sprintf("Expected: %s", values(exp$val)), sprintf("Invalid: %s", values(act$val[act_miss])) ) fail(c(msg_exp, msg_act)) } else { pass() } invisible(act$val) } #' @export #' @rdname expect_setequal expect_disjoint <- function(object, expected) { act <- quasi_label(enquo(object)) exp <- quasi_label(enquo(expected)) check_vector(act$val, error_arg = "object") check_vector(exp$val, error_arg = "expected") act_common <- act$val %in% exp$val if (any(act_common)) { fail(c( sprintf("Expected %s to be disjoint from %s.", act$lab, exp$lab), sprintf("Actual: %s", values(act$val)), sprintf("Expected: None of %s", values(exp$val)), sprintf("Invalid: %s", values(act$val[act_common])) )) } else { pass() } invisible(act$val) } # Helpers ---------------------------------------------------------------------- check_vector <- function(x, error_arg, error_call = caller_env()) { if (!is_vector(x)) { stop_input_type(x, "a vector", arg = error_arg, call = error_call) } } check_map_names <- function(x, error_arg, error_call = caller_env()) { nms <- names2(x) if (anyDuplicated(nms)) { dups <- unique(nms[duplicated(nms)]) cli::cli_abort( c( "All elements in {.arg {error_arg}} must have unique names.", x = "Duplicate names: {.str {dups}}" ), call = error_call ) } if (any(nms == "")) { empty <- which(nms == "") cli::cli_abort( c( "All elements in {.arg {error_arg}} must have names.", x = "Empty names at position{?s}: {empty}" ), call = error_call ) } } testthat/R/test-compiled-code.R0000644000176200001440000003260615072252215016127 0ustar liggesusers#' @keywords internal #' @rdname run_cpp_tests #' @export expect_cpp_tests_pass <- function(package) { check_string(package) run_testthat_tests <- get_routine(package, "run_testthat_tests") output <- "" tests_passed <- TRUE tryCatch( output <- capture_output_lines( tests_passed <- .Call(run_testthat_tests, FALSE) ), error = function(e) { cli::cli_warn( "Failed to call test entrypoint {.fn {run_testthat_tests}}." ) } ) # Drop first line of output (it's jut a '####' delimiter) info <- paste(output[-1], collapse = "\n") if (!tests_passed) { fail(paste("C++ unit tests:", info, sep = "\n")) } else { pass() } } #' Do C++ tests past? #' #' Test compiled code in the package `package`. A call to this function will #' automatically be generated for you in `tests/testthat/test-cpp.R` after #' calling [use_catch()]; you should not need to manually call this expectation #' yourself. #' #' @param package The name of the package to test. #' @keywords internal #' @export run_cpp_tests <- function(package) { check_string(package) skip_on_os("solaris") check_installed("xml2", "to run run_cpp_tests()") run_testthat_tests <- get_routine(package, "run_testthat_tests") output <- "" tests_passed <- TRUE catch_error <- FALSE tryCatch( { output <- capture_output_lines( tests_passed <- .Call(run_testthat_tests, TRUE) ) }, error = function(e) { catch_error <- TRUE reporter <- get_reporter() context_start("Catch") reporter$start_test(context = "Catch", test = "Catch") reporter$add_result( context = "Catch", test = "Catch", result = new_expectation("failure", e$message) ) reporter$end_test(context = "Catch", test = "Catch") } ) if (catch_error) { return() } report <- xml2::read_xml(paste(output, collapse = "\n")) contexts <- xml2::xml_find_all(report, "//TestCase") for (context in contexts) { context_name <- sub(" [|][^|]+$", "", xml2::xml_attr(context, "name")) context_start(context_name) tests <- xml2::xml_find_all(context, "./Section") for (test in tests) { test_name <- xml2::xml_attr(test, "name") result <- xml2::xml_find_first(test, "./OverallResults") successes <- as.integer(xml2::xml_attr(result, "successes")) get_reporter()$start_test(context = context_name, test = test_name) for (i in seq_len(successes)) { exp <- new_expectation("success", "") exp$test <- test_name get_reporter()$add_result( context = context_name, test = test_name, result = exp ) } failures <- xml2::xml_find_all(test, "./Expression") for (failure in failures) { org <- xml2::xml_find_first(failure, "Original") org_text <- xml2::xml_text(org, trim = TRUE) filename <- xml2::xml_attr(failure, "filename") type <- xml2::xml_attr(failure, "type") type_msg <- switch( type, "CATCH_CHECK_FALSE" = "isn't false.", "CATCH_CHECK_THROWS" = "did not throw an exception.", "CATCH_CHECK_THROWS_AS" = "threw an exception with unexpected type.", "isn't true." ) org_text <- paste(org_text, type_msg) line <- xml2::xml_attr(failure, "line") failure_srcref <- srcref( srcfile(file.path("src", filename)), c(line, line, 1, 1) ) exp <- new_expectation("failure", org_text, srcref = failure_srcref) exp$test <- test_name get_reporter()$add_result( context = context_name, test = test_name, result = exp ) } exceptions <- xml2::xml_find_all(test, "./Exception") for (exception in exceptions) { exception_text <- xml2::xml_text(exception, trim = TRUE) filename <- xml2::xml_attr(exception, "filename") line <- xml2::xml_attr(exception, "line") exception_srcref <- srcref( srcfile(file.path("src", filename)), c(line, line, 1, 1) ) exp <- new_expectation( "error", exception_text, srcref = exception_srcref ) exp$test <- test_name get_reporter()$add_result( context = context_name, test = test_name, result = exp ) } get_reporter()$end_test(context = context_name, test = test_name) } } } #' Use Catch for C++ unit testing #' #' Add the necessary infrastructure to enable C++ unit testing #' in \R packages with [Catch](https://github.com/catchorg/Catch2) and #' `testthat`. #' #' Calling `use_catch()` will: #' #' 1. Create a file `src/test-runner.cpp`, which ensures that the #' `testthat` package will understand how to run your package's #' unit tests, #' #' 2. Create an example test file `src/test-example.cpp`, which #' showcases how you might use Catch to write a unit test, #' #' 3. Add a test file `tests/testthat/test-cpp.R`, which ensures that #' `testthat` will run your compiled tests during invocations of #' `devtools::test()` or `R CMD check`, and #' #' 4. Create a file `R/catch-routine-registration.R`, which ensures that #' \R will automatically register this routine when #' `tools::package_native_routine_registration_skeleton()` is invoked. #' #' You will also need to: #' #' * Add xml2 to Suggests, with e.g. `usethis::use_package("xml2", "Suggests")` #' * Add testthat to LinkingTo, with e.g. #' `usethis::use_package("testthat", "LinkingTo")` #' #' C++ unit tests can be added to C++ source files within the #' `src` directory of your package, with a format similar #' to \R code tested with `testthat`. Here's a simple example #' of a unit test written with `testthat` + Catch: #' #' \preformatted{ #' context("C++ Unit Test") { #' test_that("two plus two is four") { #' int result = 2 + 2; #' expect_true(result == 4); #' } #' } #' } #' #' When your package is compiled, unit tests alongside a harness #' for running these tests will be compiled into your \R package, #' with the C entry point `run_testthat_tests()`. `testthat` #' will use that entry point to run your unit tests when detected. #' #' @section Functions: #' #' All of the functions provided by Catch are #' available with the `CATCH_` prefix -- see #' [here](https://github.com/catchorg/Catch2/blob/master/docs/assertions.md) #' for a full list. `testthat` provides the #' following wrappers, to conform with `testthat`'s #' \R interface: #' #' \tabular{lll}{ #' \strong{Function} \tab \strong{Catch} \tab \strong{Description} \cr #' `context` \tab `CATCH_TEST_CASE` \tab The context of a set of tests. \cr #' `test_that` \tab `CATCH_SECTION` \tab A test section. \cr #' `expect_true` \tab `CATCH_CHECK` \tab Test that an expression evaluates to `TRUE`. \cr #' `expect_false` \tab `CATCH_CHECK_FALSE` \tab Test that an expression evaluates to `FALSE`. \cr #' `expect_error` \tab `CATCH_CHECK_THROWS` \tab Test that evaluation of an expression throws an exception. \cr #' `expect_error_as` \tab `CATCH_CHECK_THROWS_AS` \tab Test that evaluation of an expression throws an exception of a specific class. \cr #' } #' #' In general, you should prefer using the `testthat` #' wrappers, as `testthat` also does some work to #' ensure that any unit tests within will not be compiled or #' run when using the Solaris Studio compilers (as these are #' currently unsupported by Catch). This should make it #' easier to submit packages to CRAN that use Catch. #' #' @section Symbol Registration: #' #' If you've opted to disable dynamic symbol lookup in your #' package, then you'll need to explicitly export a symbol #' in your package that `testthat` can use to run your unit #' tests. `testthat` will look for a routine with one of the names: #' #' \preformatted{ #' C_run_testthat_tests #' c_run_testthat_tests #' run_testthat_tests #' } #' #' Assuming you have `useDynLib(, .registration = TRUE)` in your package's #' `NAMESPACE` file, this implies having routine registration code of the form: #' #' ``` #' // The definition for this function comes from the file 'src/test-runner.cpp', #' // which is generated via `testthat::use_catch()`. #' extern SEXP run_testthat_tests(); #' #' static const R_CallMethodDef callMethods[] = { #' // other .Call method definitions, #' {"run_testthat_tests", (DL_FUNC) &run_testthat_tests, 0}, #' {NULL, NULL, 0} #' }; #' #' void R_init_(DllInfo* dllInfo) { #' R_registerRoutines(dllInfo, NULL, callMethods, NULL, NULL); #' R_useDynamicSymbols(dllInfo, FALSE); #' } #' ``` #' #' replacing `` above with the name of your package, as appropriate. #' #' See [Controlling Visibility](https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Controlling-visibility) #' and [Registering Symbols](https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Registering-symbols) #' in the **Writing R Extensions** manual for more information. #' #' @section Advanced Usage: #' #' If you'd like to write your own Catch test runner, you can #' instead use the `testthat::catchSession()` object in a file #' with the form: #' #' \preformatted{ #' #define TESTTHAT_TEST_RUNNER #' #include #' #' void run() #' { #' Catch::Session& session = testthat::catchSession(); #' // interact with the session object as desired #' } #' } #' #' This can be useful if you'd like to run your unit tests #' with custom arguments passed to the Catch session. #' #' @param dir The directory containing an \R package. #' #' @section Standalone Usage: #' #' If you'd like to use the C++ unit testing facilities provided #' by Catch, but would prefer not to use the regular `testthat` #' \R testing infrastructure, you can manually run the unit tests #' by inserting a call to: #' #' \preformatted{ #' .Call("run_testthat_tests", PACKAGE = ) #' } #' #' as necessary within your unit test suite. #' #' @export #' @seealso [Catch](https://github.com/catchorg/Catch2/blob/master/docs/assertions.md), #' the library used to enable C++ unit testing. use_catch <- function(dir = getwd()) { desc_path <- file.path(dir, "DESCRIPTION") if (!file.exists(desc_path)) { cli::cli_abort("No DESCRIPTION file at path {.path {desc_path}}.") } desc <- read.dcf(desc_path, all = TRUE) pkg <- desc$Package if (!nzchar(pkg)) { cli::cli_abort( "No {.field Package} field in DESCRIPTION file {.path {desc_path}}." ) } src_dir <- file.path(dir, "src") if (!file.exists(src_dir) && !dir.create(src_dir)) { cli::cli_abort("Failed to create {.path src/} directory {.path {src_dir}}.") } test_runner_path <- file.path(src_dir, "test-runner.cpp") # Copy the test runner. success <- file.copy( system.file(package = "testthat", "resources", "test-runner.cpp"), test_runner_path, overwrite = TRUE ) if (!success) { cli::cli_abort( "Failed to copy {.file test-runner.cpp} to {.path {src_dir}}." ) } # Copy the test example. success <- file.copy( system.file(package = "testthat", "resources", "test-example.cpp"), file.path(src_dir, "test-example.cpp"), overwrite = TRUE ) if (!success) { cli::cli_abort( "Failed to copy {.file test-example.cpp} to {.path {src_dir}}." ) } # Copy the 'test-cpp.R' file. test_dir <- file.path(dir, "tests", "testthat") if (!file.exists(test_dir) && !dir.create(test_dir, recursive = TRUE)) { cli::cli_abort( "Failed to create {.path tests/testthat/} directory {.path {test_dir}}." ) } template_file <- system.file(package = "testthat", "resources", "test-cpp.R") contents <- readChar(template_file, file.info(template_file)$size, TRUE) transformed <- sprintf(contents, pkg) output_path <- file.path(test_dir, "test-cpp.R") cat(transformed, file = output_path) # Copy the 'test-runner.R file. template_file <- system.file( package = "testthat", "resources", "catch-routine-registration.R" ) contents <- readChar(template_file, file.info(template_file)$size, TRUE) transformed <- sprintf(contents, pkg) output_path <- file.path(dir, "R", "catch-routine-registration.R") cat(transformed, file = output_path) cli::cli_inform(c( v = "Added C++ unit testing infrastructure.", i = "Please ensure you have {.field LinkingTo: testthat} in your DESCRIPTION.", i = "Please ensure you have {.field Suggests: xml2} in your DESCRIPTION." )) } get_routine <- function(package, routine) { # check to see if the package has explicitly exported # the associated routine (check common prefixes as we # don't necessarily have access to the NAMESPACE and # know what the prefix is) namespace <- asNamespace(package) prefixes <- c("C_", "c_", "C", "c", "_", "") for (prefix in prefixes) { name <- paste(prefix, routine, sep = "") if (exists(name, envir = namespace)) { symbol <- get(name, envir = namespace) if (inherits(symbol, "NativeSymbolInfo")) { return(symbol) } } } # otherwise, try to resolve the symbol dynamically for (prefix in prefixes) { name <- paste(prefix, routine, sep = "") resolved <- tryCatch( getNativeSymbolInfo(routine, PACKAGE = package), error = function(e) NULL ) if (inherits(resolved, "NativeSymbolInfo")) { return(resolved) } } # if we got here, we failed to find the symbol -- throw an error cli::cli_abort( "Failed to locate routine {.code {routine}} in package {.pkg {package}}." ) } (function() { .Call(run_testthat_tests, TRUE) }) testthat/R/snapshot-reporter.R0000644000176200001440000001613015127731656016153 0ustar liggesusersSnapshotReporter <- R6::R6Class( "SnapshotReporter", inherit = Reporter, public = list( snap_dir = character(), file = NULL, test = NULL, test_file_seen = character(), snap_file_seen = character(), snap_file_saved = character(), variants_changed = FALSE, fail_on_new = NULL, desc = NULL, old_snaps = NULL, cur_snaps = NULL, new_snaps = NULL, initialize = function( snap_dir = "_snaps", fail_on_new = NULL, desc = NULL ) { self$snap_dir <- normalizePath(snap_dir, mustWork = FALSE) self$fail_on_new <- fail_on_new self$desc <- desc }, start_file = function(path, test = NULL) { self$file <- context_name(path) self$test_file_seen <- c(self$test_file_seen, self$file) self$snap_file_saved <- character() self$variants_changed <- character() self$old_snaps <- FileSnaps$new(self$snap_dir, self$file, type = "old") self$cur_snaps <- FileSnaps$new(self$snap_dir, self$file, type = "cur") self$new_snaps <- FileSnaps$new(self$snap_dir, self$file, type = "new") if (!is.null(self$desc)) { # When filtering tests, we need to copy over all of the old snapshots, # apart from the one that matches the test snaps <- self$old_snaps$snaps test_name <- test_description(self$desc) for (variant in names(snaps)) { # In the case of subtests, snaps are named a / b / c1, a / b / c2 etc. # So if we run a / b, we want to remove a / b, a / b / c, a / b / c2 # Subtests that use / in their names are not currently supported. matches <- startsWith(names(snaps[[variant]]), test_name) # Can't just remove because we want to preserve order snaps[[variant]][matches] <- rep(list(NULL), sum(matches)) } self$cur_snaps$snaps <- snaps } if (!is.null(test)) { self$start_test(NULL, test) } }, start_test = function(context, test) { if (is.null(test)) { return() } self$test <- gsub("\n", "", test) }, # Called by expectation take_snapshot = function( value, save = identity, load = identity, ..., tolerance = testthat_tolerance(), variant = NULL, trace_env = caller_env() ) { check_string(self$test, allow_empty = FALSE) i <- self$new_snaps$append(self$test, variant, save(value)) old_raw <- self$old_snaps$get(self$test, variant, i) if (!is.null(old_raw)) { self$cur_snaps$append(self$test, variant, old_raw) old <- load(old_raw) comp <- waldo_compare( x = old, x_arg = "old", y = value, y_arg = "new", ..., tolerance = tolerance, quote_strings = FALSE ) if (length(comp) > 0L) { self$variants_changed <- union(self$variants_changed, variant) } else { # Use the old value for the new snapshot so the snapshot remains # unchanged if the values compare as equal self$new_snaps$set(self$test, variant, i, old_raw) } comp } else { value_enc <- save(value) self$cur_snaps$append(self$test, variant, value_enc) fail_on_new <- self$fail_on_new %||% on_ci() message <- paste0( "Adding new snapshot", if (variant != "_default") paste0(" for variant '", variant, "'"), ":\n", value_enc ) if (fail_on_new) { fail(message, trace_env = trace_env) return() } testthat_warn(message) character() } }, take_file_snapshot = function( name, path, file_equal, variant = NULL, trace_env = caller_env() ) { self$announce_file_snapshot(name) save_path <- paste0(c(self$file, variant, name), collapse = "/") if (save_path %in% self$snap_file_saved) { cli::cli_abort( "Snapshot file names must be unique. {.val {name}} has already been used.", call = trace_env ) } self$snap_file_saved <- c(self$snap_file_saved, save_path) snapshot_file_equal( snap_dir = self$snap_dir, snap_test = self$file, snap_name = name, snap_variant = variant, path = path, file_equal = file_equal, fail_on_new = self$fail_on_new, trace_env = trace_env ) }, # Also called from announce_snapshot_file() announce_file_snapshot = function(name) { self$snap_file_seen <- c(self$snap_file_seen, file.path(self$file, name)) }, add_result = function(context, test, result) { if (is.null(self$test)) { return() } # If expectation errors or skips, need to copy snapshots from old to cur # TODO: the logic is not correct here for subtests, probably because # the code was not written under the assumption that start_test() # generates a stack of tests. You can see the problem by running # local_on_cran() then testing describe.R. if (expectation_error(result) || expectation_skip(result)) { self$cur_snaps$reset(self$test, self$old_snaps) } }, end_file = function() { dir.create(self$snap_dir, showWarnings = FALSE) self$cur_snaps$write() for (variant in self$new_snaps$variants()) { if (variant %in% self$variants_changed) { self$new_snaps$write(variant) } else { self$new_snaps$delete(variant) } } }, end_reporter = function() { # clean up if we've seen all files tests <- context_name(find_test_scripts(".", full.names = FALSE)) if (!on_ci() && all(tests %in% self$test_file_seen)) { snapshot_cleanup( self$snap_dir, test_files_seen = self$test_file_seen, snap_files_seen = self$snap_file_seen ) } }, is_active = function() { !is.null(self$file) && !is.null(self$test) }, snap_files = function() { dir(self$snap_dir, recursive = TRUE) } ) ) # set/get active snapshot reporter ---------------------------------------- get_snapshotter <- function() { x <- getOption("testthat.snapshotter") if (is.null(x)) { return() } if (!x$is_active()) { return() } x } #' Instantiate local snapshotting context #' #' Needed if you want to run snapshot tests outside of the usual testthat #' framework For expert use only. #' #' @export #' @keywords internal local_snapshotter <- function( snap_dir = "_snaps", reporter = SnapshotReporter, cleanup = FALSE, desc = NULL, fail_on_new = NULL, frame = caller_env() ) { reporter <- reporter$new( snap_dir = snap_dir, fail_on_new = fail_on_new, desc = desc ) withr::local_options("testthat.snapshotter" = reporter, .local_envir = frame) reporter } local_test_snapshotter <- function( snap_dir = NULL, desc = NULL, frame = caller_env() ) { snap_dir <- snap_dir %||% withr::local_tempdir(.local_envir = frame) local_snapshotter( snap_dir = snap_dir, desc = desc, fail_on_new = FALSE, frame = frame ) } testthat/R/watcher.R0000644000176200001440000000611715072252215014101 0ustar liggesusers#' Watch a directory for changes (additions, deletions & modifications). #' #' This is used to power the [auto_test()] and #' [auto_test_package()] functions which are used to rerun tests #' whenever source code changes. #' #' Use Ctrl + break (windows), Esc (mac gui) or Ctrl + C (command line) to #' stop the watcher. #' #' @param path character vector of paths to watch. Omit trailing backslash. #' @param pattern file pattern passed to [dir()] #' @param callback function called every time a change occurs. It should #' have three parameters: added, deleted, modified, and should return #' `TRUE` to keep watching, or `FALSE` to stop. #' @param hash hashes are more accurate at detecting changes, but are slower #' for large files. When `FALSE`, uses modification time stamps #' @export #' @keywords internal watch <- function(path, callback, pattern = NULL, hash = TRUE) { prev <- dir_state(path, pattern, hash = hash) repeat { Sys.sleep(1) curr <- dir_state(path, pattern, hash = hash) changes <- compare_state(prev, curr) if (changes$n > 0) { # cat("C") keep_going <- TRUE try( keep_going <- callback(changes$added, changes$deleted, changes$modified) ) if (!isTRUE(keep_going)) return(invisible()) } else { # cat(".") } prev <- curr } } safe_digest <- function(path) { if (!file.exists(path)) { return(NA_character_) } if (is_directory(path)) { return(NA_character_) } if (!is_readable(path)) { return(NA_character_) } rlang::hash_file(path) } #' Capture the state of a directory. #' #' @param path path to directory #' @param pattern regular expression with which to filter files #' @param hash use hash (slow but accurate) or time stamp (fast but less #' accurate) #' @keywords internal dir_state <- function(path, pattern = NULL, hash = TRUE) { files <- dir(path, pattern, full.names = TRUE) # It's possible for any of the files to be deleted between the dir() # call above and the calls below; `file.info` handles this # gracefully, but digest::digest doesn't -- so we wrap it. Both # cases will return NA for files that have gone missing. if (hash) { file_states <- map_chr(files, safe_digest) } else { file_states <- file.info(files)$mtime } file_states <- stats::setNames(file_states, files) file_states[!is.na(file_states)] } #' Compare two directory states. #' #' @param old previous state #' @param new current state #' @return list containing number of changes and files which have been #' `added`, `deleted` and `modified` #' @keywords internal compare_state <- function(old, new) { added <- setdiff(names(new), names(old)) deleted <- setdiff(names(old), names(new)) same <- intersect(names(old), names(new)) modified <- names(new[same])[new[same] != old[same]] n <- length(added) + length(deleted) + length(modified) list(n = n, added = added, deleted = deleted, modified = modified) } # Helpers ----------------------------------------------------------------- is_directory <- function(x) file.info(x)$isdir is_readable <- function(x) file.access(x, 4) == 0 testthat/R/extract.R0000644000176200001440000001262215104642041014110 0ustar liggesusers#' Extract a reprex from a failed expectation #' #' @description #' `extract_test()` creates a minimal reprex for a failed expectation. #' It extracts all non-test code before the failed expectation as well as #' all code inside the test up to and including the failed expectation. #' #' This is particularly useful when you're debugging test failures in #' someone else's package. #' #' @param location A string giving the location in the form #' `FILE:LINE[:COLUMN]`. #' @param path Path to write the reprex to. Defaults to `stdout()`. #' @param package If supplied, will be used to construct a test environment #' for the extracted code. #' @return This function is called for its side effect of rendering a #' reprex to `path`. This function will never error: if extraction #' fails, the error message will be written to `path`. #' @export #' @examples #' # If you see a test failure like this: #' # -- Failure (test-extract.R:46:3): errors if can't find test ------------- #' # Expected FALSE to be TRUE. #' # Differences: #' # `actual`: FALSE #' # `expected`: TRUE #' #' # You can run this: #' \dontrun{extract_test("test-extract.R:46:3")} #' # to see just the code needed to reproduce the failure extract_test <- function( location, path = stdout(), package = Sys.getenv("TESTTHAT_PKG") ) { check_string(location) check_string(package) pieces <- strsplit(location, ":")[[1]] if (!length(pieces) %in% c(2, 3)) { cli::cli_abort(c( "Expected {.arg location} to be of the form FILE:LINE[:COLUMN]", i = "Got {.arg location}: {.val {location}}" )) } test_path <- test_path(pieces[[1]]) line <- as.integer(pieces[2]) lines <- extract_test_(test_path, line, package) base::writeLines(lines, con = path) } #' Simulate a test environment #' #' This function is designed to allow you to simulate testthat's testing #' environment in an interactive session. To undo it's affect, you #' will need to restart your R session. #' #' @keywords internal #' @param package Name of installed package. #' @param path Path to `tests/testthat`. #' @export #' @rdname topic-name simulate_test_env <- function(package, path) { check_string(package) check_string(path) env <- test_env(package) source_test_helpers(path, env = env) source_test_setup(path, env = env) invisible(env) } extract_test_ <- function( test_path, line, package = Sys.getenv("TESTTHAT_PKG") ) { source <- paste0("# Extracted from ", test_path, ":", line) exprs <- parse_file(test_path) lines <- tryCatch( extract_test_lines(exprs, line, package), error = function(cnd) { lines <- strsplit(conditionMessage(cnd), "\n")[[1]] lines <- c("", "Failed to extract test: ", lines) paste0("# ", lines) } ) lines <- c(source, "", lines) lines } save_test <- function(srcref, dir, package = Sys.getenv("TESTTHAT_PKG")) { if (env_var_is_false("TESTTHAT_PROBLEMS")) { return() } test_path <- utils::getSrcFilename(srcref, full.names = TRUE) if (is.null(test_path) || !file.exists(test_path)) { return() } line <- srcref[[3]] extracted <- extract_test_(test_path, line, package) test_name <- tools::file_path_sans_ext(basename(test_path)) dir_create(dir) problems_path <- file.path(dir, paste0(test_name, "-", line, ".R")) cat("Saving ", problems_path, "\n", sep = "") writeLines(extracted, problems_path) invisible(problems_path) } extract_test_lines <- function( exprs, line, package = "", error_call = caller_env() ) { check_number_whole(line, min = 1, call = error_call) srcrefs <- attr(exprs, "srcref") is_subtest <- map_lgl(exprs, is_subtest) # First we find the test is_test <- is_subtest & start_line(srcrefs) <= line & end_line(srcrefs) >= line if (!any(is_test)) { cli::cli_abort("Failed to find test at line {line}.", call = error_call) } call <- exprs[[which(is_test)[[1]]]] test_contents <- attr(call[[3]], "srcref")[-1] # drop `{` keep <- start_line(test_contents) <= line lines <- c(header("test"), srcref_to_character(test_contents[keep])) # We first find the prequel, all non-test code before the test is_prequel <- !is_subtest & start_line(srcrefs) < line if (any(is_prequel)) { lines <- c( header("prequel"), srcref_to_character(srcrefs[is_prequel]), "", lines ) } if (package != "") { lines <- c( header("setup"), "library(testthat)", sprintf( 'test_env <- simulate_test_env(package = "%s", path = "..")', package ), "attach(test_env, warn.conflicts = FALSE)", "", lines ) } lines } # Helpers --------------------------------------------------------------------- parse_file <- function(path, error_call = caller_env()) { check_string(path, call = error_call) if (!file.exists(path)) { cli::cli_abort( "{.arg path} ({.path {path}}) does not exist.", call = error_call ) } parse(path, keep.source = TRUE) } parse_text <- function(text) { text <- sub("^\n", "", text) indent <- regmatches(text, regexpr("^ *", text)) text <- gsub(paste0("(?m)^", indent), "", text, perl = TRUE) parse(text = text, keep.source = TRUE) } srcref_to_character <- function(x) { unlist(map(x, as.character)) } start_line <- function(srcrefs) { map_int(srcrefs, \(x) x[[1]]) } end_line <- function(srcrefs) { map_int(srcrefs, \(x) x[[3]]) } header <- function(x) { paste0("# ", x, " ", strrep("-", 80 - nchar(x) - 3)) } testthat/R/make-expectation.R0000644000176200001440000000137513201340454015676 0ustar liggesusers#' Make an equality test. #' #' This a convenience function to make a expectation that checks that #' input stays the same. #' #' @param x a vector of values #' @param expectation the type of equality you want to test for #' (`"equals"`, `"is_equivalent_to"`, `"is_identical_to"`) #' @export #' @keywords internal #' @examples #' x <- 1:10 #' make_expectation(x) #' #' make_expectation(mtcars$mpg) #' #' df <- data.frame(x = 2) #' make_expectation(df) make_expectation <- function(x, expectation = "equals") { obj <- substitute(x) expectation <- match.arg( expectation, c("equals", "is_equivalent_to", "is_identical_to") ) dput(substitute( expect_equal(obj, values), list(obj = obj, expectation = as.name(expectation), values = x) )) } testthat/R/snapshot-reporter-parallel.R0000644000176200001440000000325015053661631017735 0ustar liggesusersMainprocessSnapshotReporter <- R6::R6Class( "MainprocessSnapshotReporter", inherit = SnapshotReporter, public = list( end_file = function() { # No thing to do, this is done in the subprocess } ) ) SubprocessSnapshotReporter <- R6::R6Class( "SubprocessSnapshotReporter", inherit = SnapshotReporter, public = list( start_file = function(path, test = NULL) { private$filename <- path private$test <- test super$start_file(path, test) }, end_file = function() { private$filename <- NULL super$end_file() }, end_context = function(context) { private$context <- NULL super$end_context() }, end_test = function(context, test) { private$context <- NULL private$test <- NULL super$end_test(context, test) }, start_test = function(context, test) { private$context <- context private$test <- test super$start_test(context, test) }, announce_file_snapshot = function(name) { private$event("announce_file_snapshot", name) super$announce_file_snapshot(name) }, end_reporter = function() { # do not delete unused snapshots, that's up to the main process } ), private = list( filename = NULL, context = NULL, test = NULL, event = function(cmd, ...) { msg <- list( code = PROCESS_MSG, type = "snapshotter", cmd = cmd, filename = private$filename, context = private$context, test = private$test, time = NULL, args = list(...) ) class(msg) <- c("testthat_message", "callr_message", "condition") signalCondition(msg) } ) ) testthat/R/parallel-config.R0000644000176200001440000000172215047715224015506 0ustar liggesusersfind_parallel <- function(path, load_package = "source", package = NULL) { # If env var is set, then use that parenv <- Sys.getenv("TESTTHAT_PARALLEL", NA_character_) if (!is.na(parenv)) { if (toupper(parenv) == "TRUE") { return(TRUE) } if (toupper(parenv) == "FALSE") { return(FALSE) } cli::cli_abort( "{.envvar TESTTHAT_PARALLEL} must be {.code TRUE} or {.code FALSE}." ) } # Make sure we get the local package package if not "installed" if (load_package != "installed") { package <- NULL } desc <- find_description(path, package) if (is.null(desc)) { return(FALSE) } par <- identical( toupper(desc$get_field("Config/testthat/parallel", default = "FALSE")), "TRUE" ) if (par) { ed <- as.integer(desc$get_field("Config/testthat/edition", default = 2L)) if (ed < 3) { cli::cli_inform("Running tests in parallel requires the 3rd edition.") par <- FALSE } } par } testthat/R/expect-match.R0000644000176200001440000001101315103417311015010 0ustar liggesusers#' Do you expect a string to match this pattern? #' #' @details #' `expect_match()` checks if a character vector matches a regular expression, #' powered by [grepl()]. #' #' `expect_no_match()` provides the complementary case, checking that a #' character vector *does not* match a regular expression. #' #' @inheritParams expect_that #' @inheritParams base::grepl #' @param regexp Regular expression to test against. #' @param all Should all elements of actual value match `regexp` (TRUE), #' or does only one need to match (FALSE). #' @param fixed If `TRUE`, treats `regexp` as a string to be matched exactly #' (not a regular expressions). Overrides `perl`. #' @inheritDotParams base::grepl -pattern -x -perl -fixed #' @family expectations #' @export #' @examples #' expect_match("Testing is fun", "fun") #' expect_match("Testing is fun", "f.n") #' expect_no_match("Testing is fun", "horrible") #' #' show_failure(expect_match("Testing is fun", "horrible")) #' show_failure(expect_match("Testing is fun", "horrible", fixed = TRUE)) #' #' # Zero-length inputs always fail #' show_failure(expect_match(character(), ".")) expect_match <- function( object, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL ) { act <- quasi_label(enquo(object), label) check_character(object) check_string(regexp) check_bool(perl) check_bool(fixed) check_bool(all) if (length(object) == 0) { fail( sprintf("Expected %s to have at least one element.", act$lab), info = info ) } else { expect_match_( act = act, regexp = regexp, perl = perl, fixed = fixed, ..., all = all, info = info, label = label, negate = FALSE ) } invisible(act$val) } #' @describeIn expect_match Check that a string doesn't match a regular #' expression. #' @export expect_no_match <- function( object, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL ) { # Capture here to avoid environment-related messiness act <- quasi_label(enquo(object), label) check_character(object) check_string(regexp) check_bool(perl) check_bool(fixed) check_bool(all) expect_match_( act = act, regexp = regexp, perl = perl, fixed = fixed, ..., all = all, info = info, label = label, negate = TRUE ) invisible(act$val) } expect_match_ <- function( act, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL, negate = FALSE, title = "text", trace_env = caller_env() ) { matches <- grepl(regexp, act$val, perl = perl, fixed = fixed, ...) condition <- if (negate) !matches else matches ok <- if (all) all(condition) else any(condition) if (!ok) { values <- show_text(act$val, condition) if (length(act$val) == 1) { which <- "" } else { which <- if (all) "every element of " else "some element of " } match <- if (negate) "not to match" else "to match" msg_exp <- sprintf( "Expected %s%s %s %s %s.", which, act$lab, match, if (fixed) "string" else "regexp", encodeString(regexp, quote = '"') ) msg_act <- c(paste0("Actual ", title, ':'), values) fail(c(msg_exp, msg_act), info = info, trace_env = trace_env) } else { pass() } } # Adapted from print.ellmer_prompt show_text <- function(x, matches = NULL, max_items = 20, max_lines = NULL) { matches <- matches %||% rep(TRUE, length(x)) max_lines <- max_lines %||% (max_items * 25) n <- length(x) n_extra <- length(x) - max_items if (n_extra > 0) { x <- x[seq_len(max_items)] matches <- matches[seq_len(max_items)] } if (length(x) == 0) { return(character()) } bar <- if (cli::is_utf8_output()) "\u2502" else "|" id <- ifelse( matches, cli::col_green(cli::symbol$tick), cli::col_red(cli::symbol$cross) ) indent <- paste0(id, " ", bar, " ") exdent <- paste0(" ", cli::col_grey(bar), " ") x[is.na(x)] <- cli::col_red("") x <- paste0(indent, x) x <- gsub("\n", paste0("\n", exdent), x) lines <- strsplit(x, "\n") ids <- rep(seq_along(x), lengths(lines)) first <- c(TRUE, ids[-length(ids)] != ids[-1]) lines <- unlist(lines) if (length(lines) > max_lines) { if (first[max_lines + 1]) { max_lines <- max_lines - 1 } lines <- lines[seq_len(max_lines)] lines <- c(lines, paste0(exdent, "...")) n_extra <- n - ids[max_lines - 1] } if (n_extra > 0) { lines <- c(lines, paste0("... and ", n_extra, " more.\n")) } lines } testthat/R/capture-output.R0000644000176200001440000000411215040747537015451 0ustar liggesusers#' Capture output to console #' #' Evaluates `code` in a special context in which all output is captured, #' similar to [capture.output()]. #' #' Results are printed using the `testthat_print()` generic, which defaults #' to `print()`, giving you the ability to customise the printing of your #' object in tests, if needed. #' #' @param code Code to evaluate. #' @param print If `TRUE` and the result of evaluating `code` is #' visible, print the result using `testthat_print()`. #' @param width Number of characters per line of output. This does not #' inherit from `getOption("width")` so that tests always use the same #' output width, minimising spurious differences. #' @return `capture_output()` returns a single string. `capture_output_lines()` #' returns a character vector with one entry for each line #' @keywords internal #' @export #' @examples #' capture_output({ #' cat("Hi!\n") #' cat("Bye\n") #' }) #' #' capture_output_lines({ #' cat("Hi!\n") #' cat("Bye\n") #' }) #' #' capture_output("Hi") #' capture_output("Hi", print = TRUE) capture_output <- function(code, print = FALSE, width = 80) { output <- capture_output_lines(code, print, width = width) paste0(output, collapse = "\n") } #' @export #' @rdname capture_output capture_output_lines <- function(code, print = FALSE, width = 80) { eval_with_output(code, print = print, width = width)$out } eval_with_output <- function(code, print = FALSE, width = 80) { path <- withr::local_tempfile() if (!is.null(width)) { local_width(width) } result <- withr::with_output_sink(path, withVisible(code)) if (result$visible && print) { withr::with_output_sink(path, testthat_print(result$value), append = TRUE) } # A sink() will always write in the native encoding, so we read with # base::readLines() then convert to UTF-8 list( val = result$value, vis = result$visible, out = enc2utf8(base::readLines(path, warn = FALSE)) ) } #' @export #' @rdname capture_output testthat_print <- function(x) { UseMethod("testthat_print") } #' @export testthat_print.default <- function(x) { print(x) } testthat/R/reporter-teamcity.R0000644000176200001440000000423515053661631016127 0ustar liggesusers#' Report results in Teamcity format #' #' This reporter will output results in the Teamcity message format. #' For more information about Teamcity messages, see #' http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity #' #' @export #' @family reporters TeamcityReporter <- R6::R6Class( "TeamcityReporter", inherit = Reporter, public = list( i = NA_integer_, initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE }, start_context = function(context) { private$report_event("testSuiteStarted", context) }, end_context = function(context) { private$report_event("testSuiteFinished", context) self$cat_line() self$cat_line() }, start_test = function(context, test) { private$report_event("testSuiteStarted", test) self$i <- 1L }, end_test = function(context, test) { private$report_event("testSuiteFinished", test) self$cat_line() }, add_result = function(context, test, result) { testName <- paste0("expectation ", self$i) self$i <- self$i + 1L if (expectation_skip(result)) { private$report_event("testIgnored", testName, message = format(result)) return() } private$report_event("testStarted", testName) if (!expectation_ok(result)) { lines <- strsplit(format(result), "\n")[[1]] private$report_event( "testFailed", testName, message = lines[1], details = paste(lines[-1], collapse = "\n") ) } private$report_event("testFinished", testName) } ), private = list( report_event = function(event, name, ...) { values <- list(name = name, ...) values <- map_chr(values, teamcity_escape) if (length(values) == 0) { value_string <- "" } else { value_string <- paste0(names(values), "='", values, "'", collapse = " ") } self$cat_line("##teamcity[", event, " ", value_string, "]") } ) ) # teamcity escape character is | teamcity_escape <- function(s) { s <- gsub("(['|]|\\[|\\])", "|\\1", s) gsub("\n", "|n", s) } testthat/R/testthat-package.R0000644000176200001440000000211715127460763015703 0ustar liggesusers#' An R package to make testing fun! #' #' Try the example below. Have a look at the references and learn more #' from function documentation such as [test_that()]. #' #' @section Options: #' - `testthat.use_colours`: Should the output be coloured? (Default: `TRUE`). #' - `testthat.summary.max_reports`: The maximum number of detailed test #' reports printed for the summary reporter (default: 10). #' - `testthat.summary.omit_dots`: Omit progress dots in the summary reporter #' (default: `FALSE`). #' #' @keywords internal "_PACKAGE" #' @import rlang #' @importFrom brio writeLines readLines #' @useDynLib testthat, .registration = TRUE NULL the <- new.env(parent = emptyenv()) the$description <- character() the$top_level_test <- TRUE the$test_expectations <- 0 the$in_check_reporter <- FALSE # The following block is used by usethis to automatically manage # roxygen namespace tags. Modify with care! ## usethis namespace: start #' @importFrom lifecycle deprecated ## usethis namespace: end NULL # nocov start .onLoad <- function(libname, pkgname) { otel_cache_tracer() } # nocov end testthat/R/expect-that.R0000644000176200001440000000602515072252215014670 0ustar liggesusers#' Declare that an expectation either passes or fails #' #' @description #' These are the primitives that you can use to implement your own expectations. #' Every path through an expectation should either call `pass()`, `fail()`, #' or throw an error (e.g. if the arguments are invalid). Expectations should #' always return `invisible(act$val)`. #' #' Learn more about creating your own expectations in #' `vignette("custom-expectation")`. #' #' @param message A character vector describing the failure. The #' first element should describe the expected value, and the second (and #' optionally subsequence) elements should describe what was actually seen. #' @param info Character vector continuing additional information. Included #' for backward compatibility only and new expectations should not use it. #' @param srcref Location of the failure. Should only needed to be explicitly #' supplied when you need to forward a srcref captured elsewhere. #' @param trace_env If `trace` is not specified, this is used to generate an #' informative traceback for failures. You should only need to set this if #' you're calling `fail()` from a helper function; see #' `vignette("custom-expectation")` for details. #' @param trace An optional backtrace created by [rlang::trace_back()]. #' When supplied, the expectation is displayed with the backtrace. #' Expert use only. #' @export #' @examples #' expect_length <- function(object, n) { #' act <- quasi_label(rlang::enquo(object), arg = "object") #' #' act_n <- length(act$val) #' if (act_n != n) { #' fail(sprintf("%s has length %i, not length %i.", act$lab, act_n, n)) #' } else { #' pass() #' } #' #' invisible(act$val) #' } fail <- function( message = "Failure has been forced", info = NULL, srcref = NULL, trace_env = caller_env(), trace = NULL ) { check_character(message) check_character(info, allow_null = TRUE) trace <- trace %||% capture_trace(trace_env) message <- paste(c(message, info), collapse = "\n") expectation("failure", message, srcref = srcref, trace = trace) invisible() } snapshot_fail <- function(message, trace_env = caller_env()) { trace <- capture_trace(trace_env) message <- paste(message, collapse = "\n") expectation("failure", message, trace = trace, snapshot = TRUE) invisible() } capture_trace <- function(trace_env) { trace <- trace_back(top = getOption("testthat_topenv"), bottom = trace_env) # Only include trace if there's at least one function apart from the expectation if (!is.null(trace) && trace_length(trace) <= 1) { trace <- NULL } trace } #' @rdname fail #' @export pass <- function() { expectation("success", "success") invisible() } #' Mark a test as successful #' #' This is an older version of [pass()] that exists for backwards compatibility. #' You should now use `pass()` instead. #' #' @export #' @inheritParams fail #' @keywords internal succeed <- function(message = "Success has been forced", info = NULL) { message <- paste(c(message, info), collapse = "\n") expectation("success", message) } testthat/R/test-package.R0000644000176200001440000000354615054053615015022 0ustar liggesusers#' Run all tests in a package #' #' @description #' * `test_local()` tests a local source package. #' * `test_package()` tests an installed package. #' * `test_check()` checks a package during `R CMD check`. #' #' See `vignette("special-files")` to learn about the various files that #' testthat works with. #' #' @section `R CMD check`: #' To run testthat automatically from `R CMD check`, make sure you have #' a `tests/testthat.R` that contains: #' #' ``` #' library(testthat) #' library(yourpackage) #' #' test_check("yourpackage") #' ``` #' #' @inherit test_dir return params #' @inheritSection test_dir Environments #' @param ... Additional arguments passed to [test_dir()] #' @export #' @rdname test_package test_package <- function(package, reporter = check_reporter(), ...) { test_path <- system.file("tests", "testthat", package = package) if (test_path == "") { cli::cli_inform("No installed testthat tests found for {.pkg {package}}.") return(invisible()) } test_dir( test_path, package = package, reporter = reporter, ..., load_package = "installed" ) } #' @export #' @rdname test_package test_check <- function(package, reporter = check_reporter(), ...) { require(package, character.only = TRUE) options(cli.hyperlink = FALSE) withr::local_envvar(TESTTHAT_IS_CHECKING = "true") test_dir( "testthat", package = package, reporter = reporter, ..., load_package = "installed" ) } #' @export #' @rdname test_package test_local <- function( path = ".", reporter = NULL, ..., load_package = "source", shuffle = FALSE ) { package <- pkgload::pkg_name(path) test_path <- file.path(pkgload::pkg_path(path), "tests", "testthat") local_assume_not_on_cran() test_dir( test_path, package = package, reporter = reporter, ..., load_package = load_package, shuffle = shuffle ) } testthat/R/examples.R0000644000176200001440000000114515047715224014264 0ustar liggesusers#' Retrieve paths to built-in example test files #' #' `testthat_examples()` retrieves path to directory of test files, #' `testthat_example()` retrieves path to a single test file. #' #' @keywords internal #' @param filename Name of test file #' @export #' @examples #' dir(testthat_examples()) #' testthat_example("success") testthat_examples <- function() { system.file("examples", package = "testthat") } #' @export #' @rdname testthat_examples testthat_example <- function(filename) { system.file( "examples", paste0("test-", filename, ".R"), package = "testthat", mustWork = TRUE ) } testthat/R/mock.R0000644000176200001440000000177015054053615013400 0ustar liggesusers#' Mock functions in a package. #' #' @description #' `r lifecycle::badge("defunct")` #' #' `with_mock()` and `local_mock()` are now defunct, and can be replaced by #' [with_mocked_bindings()] and [local_mocked_bindings()]. These functions only #' worked by abusing of R's internals. #' #' @param ... named parameters redefine mocked functions, unnamed parameters #' will be evaluated after mocking the functions #' @param .env the environment in which to patch the functions, #' defaults to the top-level environment. A character is interpreted as #' package name. #' @param .local_envir Environment in which to add exit handler. #' For expert use only. #' @keywords internal #' @export with_mock <- function(..., .env = topenv()) { lifecycle::deprecate_stop("3.2.0", "with_mock()", "with_mocked_bindings()") } #' @export #' @rdname with_mock local_mock <- function(..., .env = topenv(), .local_envir = parent.frame()) { lifecycle::deprecate_stop("3.2.0", "local_mock()", "local_mocked_bindings()") } testthat/R/otel.R0000644000176200001440000000356215127460763013422 0ustar liggesusersotel_tracer_name <- "org.r-lib.testthat" # generic otel helpers --------------------------------------------------------- otel_cache_tracer <- NULL otel_local_test_span <- NULL otel_update_span <- NULL local({ otel_is_tracing <- FALSE otel_tracer <- NULL otel_cache_tracer <<- function() { requireNamespace("otel", quietly = TRUE) || return() otel_tracer <<- otel::get_tracer(otel_tracer_name) otel_is_tracing <<- tracer_enabled(otel_tracer) } otel_local_test_span <<- function(name, scope = parent.frame()) { otel_is_tracing || return() otel::start_local_active_span( sprintf("test that %s", name), tracer = otel_tracer, activation_scope = scope ) } otel_update_span <<- function( span, n_success, n_failure, n_error, n_skip, n_warning ) { otel_is_tracing || return() total <- n_success + n_failure + n_error + n_skip + n_warning test_status <- if (n_error > 0) { "error" } else if (n_failure > 0) { "fail" } else if (total == 0 || n_skip == total) { "skip" } else { "pass" } span$set_attribute("test.expectations.total", total) span$set_attribute("test.expectations.passed", n_success) span$set_attribute("test.expectations.failed", n_failure) span$set_attribute("test.expectations.error", n_error) span$set_attribute("test.expectations.skipped", n_skip) span$set_attribute("test.expectations.warning", n_warning) span$set_attribute("test.status", test_status) if (test_status %in% c("pass", "skip")) { span$set_status("ok") } else { span$set_status("error", paste("Test", test_status)) } } }) tracer_enabled <- function(tracer) { .subset2(tracer, "is_enabled")() } with_otel_record <- function(expr) { on.exit(otel_cache_tracer()) otelsdk::with_otel_record({ otel_cache_tracer() expr }) } testthat/R/expect-output.R0000644000176200001440000000356615072252215015277 0ustar liggesusers#' Do you expect printed output to match this pattern? #' #' Test for output produced by `print()` or `cat()`. This is best used for #' very simple output; for more complex cases use [expect_snapshot()]. #' #' @export #' @family expectations #' @inheritParams expect_that #' @param regexp Regular expression to test against. #' * A character vector giving a regular expression that must match the output. #' * If `NULL`, the default, asserts that there should output, #' but doesn't check for a specific value. #' * If `NA`, asserts that there should be no output. #' @inheritDotParams expect_match -object -regexp -info -label #' @inheritParams capture_output #' @return The first argument, invisibly. #' @examples #' str(mtcars) #' expect_output(str(mtcars), "32 obs") #' expect_output(str(mtcars), "11 variables") #' #' # You can use the arguments of grepl to control the matching #' expect_output(str(mtcars), "11 VARIABLES", ignore.case = TRUE) #' expect_output(str(mtcars), "$ mpg", fixed = TRUE) expect_output <- function( object, regexp = NULL, ..., info = NULL, label = NULL, width = 80 ) { check_number_whole(width, min = 1) act <- quasi_capture(enquo(object), label, capture_output, width = width) if (identical(regexp, NA)) { if (!identical(act$cap, "")) { msg <- c( sprintf("Expected %s to produce no output.", act$lab), sprintf("Actual output:\n%s", encodeString(act$cap)) ) fail(msg, info = info) } else { pass() } } else if (is.null(regexp) || identical(act$cap, "")) { if (identical(act$cap, "")) { msg <- sprintf("Expected %s to produce output.", act$lab) fail(msg, info = info) } else { pass() } } else { act_out <- labelled_value(act$cap, paste0("output from ", act$lab)) expect_match_(act_out, enc2native(regexp), ..., title = "output") } invisible(act$val) } testthat/R/expect-known.R0000644000176200001440000001527215072252215015070 0ustar liggesusers#' Do you expect the results/output to equal a known value? #' #' For complex printed output and objects, it is often challenging to describe #' exactly what you expect to see. `expect_known_value()` and #' `expect_known_output()` provide a slightly weaker guarantee, simply #' asserting that the values have not changed since the last time that you ran #' them. #' #' These expectations should be used in conjunction with git, as otherwise #' there is no way to revert to previous values. Git is particularly useful #' in conjunction with `expect_known_output()` as the diffs will show you #' exactly what has changed. #' #' Note that known values updates will only be updated when running tests #' interactively. `R CMD check` clones the package source so any changes to #' the reference files will occur in a temporary directory, and will not be #' synchronised back to the source package. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `expect_known_output()` and friends are deprecated in the 3rd edition; #' please use [expect_snapshot_output()] and friends instead. #' #' @export #' @param file File path where known value/output will be stored. #' @param update Should the file be updated? Defaults to `TRUE`, with #' the expectation that you'll notice changes because of the first failure, #' and then see the modified files in git. #' @param version The serialization format version to use. The default, 2, was #' the default format from R 1.4.0 to 3.5.3. Version 3 became the default from #' R 3.6.0 and can only be read by R versions 3.5.0 and higher. #' @param ... Passed on to [waldo::compare()]. #' @keywords internal #' @inheritParams expect_equal #' @inheritParams capture_output_lines #' @examples #' tmp <- tempfile() #' #' # The first run always succeeds #' expect_known_output(mtcars[1:10, ], tmp, print = TRUE) #' #' # Subsequent runs will succeed only if the file is unchanged #' # This will succeed: #' expect_known_output(mtcars[1:10, ], tmp, print = TRUE) #' #' \dontrun{ #' # This will fail #' expect_known_output(mtcars[1:9, ], tmp, print = TRUE) #' } expect_known_output <- function( object, file, update = TRUE, ..., info = NULL, label = NULL, print = FALSE, width = 80 ) { check_string(file) check_bool(update) check_bool(print) check_number_whole(width, min = 1) edition_deprecate( 3, "expect_known_output()", "Please use `expect_snapshot_output()` instead" ) act <- list() act$quo <- enquo(object) act$lab <- label %||% quo_label(act$quo) act <- append(act, eval_with_output(object, print = print, width = width)) expect_file_unchanged_(file, act$out, update = update, info = info, ...) invisible(act$val) } expect_file_unchanged_ <- function( path, lines, ..., update = TRUE, info = NULL, trace_env = caller_env() ) { if (!file.exists(path)) { cli::cli_warn("Creating reference output.") brio::write_lines(lines, path) pass() return() } old_lines <- brio::read_lines(path) if (update) { brio::write_lines(lines, path) if (!all_utf8(lines)) { cli::cli_warn("New reference output is not UTF-8 encoded.") } } if (!all_utf8(old_lines)) { cli::cli_warn("Reference output is not UTF-8 encoded.") } comp <- waldo_compare( x = old_lines, x_arg = "old", y = lines, y_arg = "new", ... ) if (length(comp) != 0) { msg <- sprintf( "Results have changed from known value recorded in %s.\n\n%s", encodeString(path, quote = "'"), paste0(comp, collapse = "\n\n") ) fail(msg, info = info, trace_env = trace_env) } else { pass() } } #' Do you expect the output/result to equal a known good value? #' #' `expect_output_file()` behaves identically to [expect_known_output()]. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `expect_output_file()` is deprecated in the 3rd edition; #' please use [expect_snapshot_output()] and friends instead. #' #' @export #' @keywords internal expect_output_file <- function( object, file, update = TRUE, ..., info = NULL, label = NULL, print = FALSE, width = 80 ) { check_string(file) check_bool(update) check_bool(print) check_number_whole(width, min = 1) # Code is a copy of expect_known_output() edition_deprecate( 3, "expect_output_file()", "Please use `expect_snapshot_output()` instead" ) act <- list() act$quo <- enquo(object) act$lab <- label %||% quo_label(act$quo) act <- append(act, eval_with_output(object, print = print, width = width)) expect_file_unchanged_(file, act$out, update = update, info = info, ...) invisible(act$val) } #' @export #' @rdname expect_known_output expect_known_value <- function( object, file, update = TRUE, ..., info = NULL, label = NULL, version = 2 ) { check_string(file) check_bool(update) check_number_whole(version, min = 1) edition_deprecate( 3, "expect_known_value()", "Please use `expect_snapshot_value()` instead" ) act <- quasi_label(enquo(object), label) if (!file.exists(file)) { cli::cli_warn("Creating reference value.") saveRDS(object, file, version = version) pass() } else { ref_val <- readRDS(file) comp <- compare(act$val, ref_val, ...) if (update && !comp$equal) { saveRDS(act$val, file, version = version) } if (!comp$equal) { msg <- sprintf( "%s has changed from known value recorded in %s.\n%s", act$lab, encodeString(file, quote = "'"), comp$message ) fail(msg, info = info) } else { pass() } } invisible(act$val) } #' @export #' @rdname expect_known_output #' @usage NULL expect_equal_to_reference <- function(..., update = FALSE) { edition_deprecate( 3, "expect_equal_to_reference()", "Please use `expect_snapshot_value()` instead" ) expect_known_value(..., update = update) } #' @export #' @rdname expect_known_output #' @param hash Known hash value. Leave empty and you'll be informed what #' to use in the test output. expect_known_hash <- function(object, hash = NULL) { check_installed("digest") edition_deprecate( 3, "expect_known_hash()", "Please use `expect_snapshot_value()` instead" ) act <- quasi_label(enquo(object)) act_hash <- digest::digest(act$val) if (!is.null(hash)) { act_hash <- substr(act_hash, 1, nchar(hash)) } if (is.null(hash)) { cli::cli_warn("No recorded hash: use {substr(act_hash, 1, 10)}.") pass() } else { if (hash != act_hash) { fail(sprintf( "Expected value to hash to %s.\nActual hash: %s", hash, act_hash )) } else { pass() } } invisible(act$val) } all_utf8 <- function(x) { !anyNA(iconv(x, "UTF-8", "UTF-8")) } testthat/R/reporter-zzz.R0000644000176200001440000000571215054571075015151 0ustar liggesusers#' Get and set active reporter. #' #' `get_reporter()` and `set_reporter()` access and modify the current "active" #' reporter. Generally, these functions should not be called directly; instead #' use `with_reporter()` to temporarily change, then reset, the active reporter. #' #' #' @param reporter Reporter to use to summarise output. Can be supplied #' as a string (e.g. "summary") or as an R6 object #' (e.g. `SummaryReporter$new()`). #' #' See [Reporter] for more details and a list of built-in reporters. #' @param code Code to execute. #' @return `with_reporter()` invisible returns the reporter active when `code` #' was evaluated. #' @param start_end_reporter Should the reporters `start_reporter()` and #' `end_reporter()` methods be called? For expert use only. #' @keywords internal #' @name reporter-accessors NULL #' @rdname reporter-accessors #' @export set_reporter <- function(reporter) { old <- the$reporter the$reporter <- reporter invisible(old) } #' @rdname reporter-accessors #' @export get_reporter <- function() { the$reporter } #' @rdname reporter-accessors #' @export with_reporter <- function(reporter, code, start_end_reporter = TRUE) { # Ensure we don't propagate the local description to the new reporter local_description_set() reporter <- find_reporter(reporter) old <- set_reporter(reporter) withr::defer(set_reporter(old)) if (start_end_reporter) { reporter$start_reporter() } tryCatch(code, testthat_abort_reporter = function(cnd) { cat(conditionMessage(cnd), "\n") NULL }) if (start_end_reporter) { reporter$end_reporter() } invisible(reporter) } stop_reporter <- function(message) { cli::cli_abort( message, class = "testthat_abort_reporter", call = NULL ) } #' Find reporter object given name or object. #' #' If not found, will return informative error message. #' Pass a character vector to create a [MultiReporter] composed #' of individual reporters. #' Will return null if given NULL. #' #' @param reporter name of reporter(s), or reporter object(s) #' @keywords internal find_reporter <- function(reporter) { if (is.null(reporter)) { return(NULL) } if (inherits(reporter, "R6ClassGenerator")) { reporter$new() } else if (inherits(reporter, "Reporter")) { reporter } else if (is_string(reporter)) { find_reporter_one(reporter) } else if (is.character(reporter)) { reporters <- lapply(reporter, find_reporter_one, call = current_env()) MultiReporter$new(reporters) } else { stop_input_type( reporter, c( "a string", "a character vector", "a reporter object", "a reporter class" ) ) } } find_reporter_one <- function(reporter, ..., call = caller_env()) { name <- reporter substr(name, 1, 1) <- toupper(substr(name, 1, 1)) name <- paste0(name, "Reporter") if (!exists(name)) { cli::cli_abort("Cannot find test reporter {.arg {reporter}}.", call = call) } get(name)$new(...) } testthat/R/expectation.R0000644000176200001440000001476315111027202014763 0ustar liggesusers#' The previous building block of all `expect_` functions #' #' Previously, we recommended using `expect()` when writing your own #' expectations. Now we instead recommend [pass()] and [fail()]. See #' `vignette("custom-expectation")` for details. #' #' @param ok `TRUE` or `FALSE` indicating if the expectation was successful. #' @param failure_message A character vector describing the failure. The #' first element should describe the expected value, and the second (and #' optionally subsequence) elements should describe what was actually seen. #' @inheritParams fail #' @return An expectation object from either `succeed()` or `fail()`. #' with a `muffle_expectation` restart. #' @seealso [exp_signal()] #' @keywords internal #' @export expect <- function( ok, failure_message, info = NULL, srcref = NULL, trace = NULL, trace_env = caller_env() ) { check_bool(ok) check_character(failure_message) if (!ok) { fail( failure_message, info, srcref = srcref, trace = trace, trace_env = trace_env ) } else { # For backwards compatibility succeed(failure_message) } } #' Expectation conditions #' #' @description #' `new_expectation()` creates an expectation condition object and #' `exp_signal()` signals it. `expectation()` does both. `is.expectation()` #' tests if a captured condition is a testthat expectation. #' #' These functions are primarily for internal use. If you are creating your #' own expectation, you do not need these functions are instead should use #' [pass()] or [fail()]. See `vignette("custom-expectation")` for more #' details. #' #' @param type Expectation type. Must be one of "success", "failure", "error", #' "skip", "warning". #' @param message Message describing test failure #' @param srcref Optional `srcref` giving location of test. #' @keywords internal #' @inheritParams expect #' @export expectation <- function(type, message, ..., srcref = NULL, trace = NULL) { exp <- new_expectation(type, message, ..., srcref = srcref, trace = trace) exp_signal(exp) } #' @rdname expectation #' @param ... Additional attributes for the expectation object. #' @param .subclass An optional subclass for the expectation object. #' @export new_expectation <- function( type, message, ..., srcref = NULL, trace = NULL, .subclass = NULL ) { type <- match.arg(type, c("success", "failure", "error", "skip", "warning")) structure( list( message = message, srcref = srcref, trace = trace ), class = c( .subclass, paste0("expectation_", type), "expectation", # Make broken expectations catchable by try() if (type %in% c("failure", "error")) "error", "condition" ), ... ) } #' @rdname expectation #' @param exp An expectation object, as created by #' [new_expectation()]. #' @export exp_signal <- function(exp) { withRestarts( if (expectation_broken(exp)) { stop(exp) } else { signalCondition(exp) }, muffle_expectation = function(e) NULL, # Legacy support for shinytest2 # https://github.com/r-lib/testthat/pull/2271#discussion_r2528722708 continue_test = function(e) NULL ) invisible(exp) } #' @export #' @rdname expectation #' @param x object to test for class membership is.expectation <- function(x) inherits(x, "expectation") #' @export print.expectation <- function(x, ...) { cat( cli::style_bold("<", paste0(class(x), collapse = "/"), ">"), "\n", sep = "" ) cat(format(x), "\n", sep = "") invisible(x) } #' @export format.expectation_success <- function(x, ...) { "As expected" } #' @export format.expectation <- function(x, ...) { # Access error fields with `[[` rather than `$` because the # `$.Throwable` from the rJava package throws with unknown fields if (is.null(x[["trace"]]) || trace_length(x[["trace"]]) == 0L) { return(x$message) } trace_lines <- format(x$trace, ...) lines <- c(x$message, cli::style_bold("Backtrace:"), trace_lines) paste(lines, collapse = "\n") } # as.expectation ---------------------------------------------------------- as.expectation <- function(x, srcref = NULL) { UseMethod("as.expectation", x) } #' @export as.expectation.expectation <- function(x, srcref = NULL) { x$srcref <- x$srcref %||% srcref x } #' @export as.expectation.error <- function(x, srcref = NULL) { if (is.null(x$call)) { header <- paste0("Error: ") } else { header <- paste0("Error in `", deparse1(x$call), "`: ") } msg <- paste0( if (!is_simple_error(x)) { paste0("<", paste(class(x), collapse = "/"), ">\n") }, header, cnd_message(x) ) new_expectation("error", msg, srcref = srcref, trace = x[["trace"]]) } is_simple_error <- function(x) { class(x)[[1]] %in% c("simpleError", "rlang_error") } #' @export as.expectation.warning <- function(x, srcref = NULL) { new_expectation( "warning", cnd_message(x), srcref = srcref, trace = x[["trace"]] ) } #' @export as.expectation.skip <- function(x, ..., srcref = NULL) { new_expectation("skip", cnd_message(x), srcref = srcref, trace = x[["trace"]]) } #' @export as.expectation.default <- function(x, srcref = NULL) { cli::cli_abort( "Don't know how to convert {.cls {class(x)}} to expectation.", call = NULL ) } # expectation_type -------------------------------------------------------- expectation_type <- function(exp) { stopifnot(is.expectation(exp)) gsub("^expectation_", "", class(exp)[[1]]) } expectation_success <- function(exp) expectation_type(exp) == "success" expectation_failure <- function(exp) expectation_type(exp) == "failure" expectation_error <- function(exp) expectation_type(exp) == "error" expectation_skip <- function(exp) expectation_type(exp) == "skip" expectation_warning <- function(exp) expectation_type(exp) == "warning" expectation_broken <- function(exp) { expectation_failure(exp) || expectation_error(exp) } expectation_ok <- function(exp) { expectation_type(exp) %in% c("success", "warning") } single_letter_summary <- function(x) { switch( expectation_type(x), skip = colourise("S", "skip"), success = colourise(".", "success"), error = colourise("E", "error"), failure = colourise("F", "failure"), warning = colourise("W", "warning"), "?" ) } expectation_location <- function(x, prefix = "", suffix = "") { srcref <- x$srcref if (!inherits(srcref, "srcref")) { return("") } filename <- attr(srcref, "srcfile")$filename cli::format_inline( "{prefix}{.file {filename}:{srcref[1]}:{srcref[2]}}{suffix}" ) } testthat/R/reporter-debug.R0000644000176200001440000000471315125277274015405 0ustar liggesusers#' Interactively debug failing tests #' #' This reporter will call a modified version of [recover()] on all #' broken expectations. #' #' @export #' @family reporters DebugReporter <- R6::R6Class( "DebugReporter", inherit = Reporter, public = list( add_result = function(context, test, result) { if (!expectation_success(result) && !is.null(result$start_frame)) { if (sink_number() > 0) { sink(self$out) withr::defer(sink()) } recover2( start_frame = result$start_frame, end_frame = result$end_frame ) } } ) ) sink_number <- function() { sink.number(type = "output") } # recover2 ---------------------------------------------------------------- # Modeled after utils::recover(), which is # part of the R package, https://www.R-project.org # # Copyright (C) 1995-2016 The R Core Team # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # https://www.R-project.org/Licenses/ recover2 <- function(start_frame = 1L, end_frame = sys.nframe()) { calls <- sys.calls() if (.isMethodsDispatchOn()) { tState <- tracingState(FALSE) withr::defer(tracingState(tState)) } from <- min(end_frame, length(calls)) calls <- calls[start_frame:from] if (is_false(peek_option("testthat_format_srcrefs"))) { calls <- lapply(calls, zap_srcref) } calls <- utils::limitedLabels(calls) repeat { which <- show_menu(calls, "\nEnter a frame number, or 0 to exit ") if (which) { frame <- sys.frame(start_frame - 2 + which) browse_frame(frame, skip = 7 - which) } else { break } } } # Helpers ----------------------------------------------------------------- zap_srcref <- function(x) { attr(x, "srcref") <- NULL x } show_menu <- function(choices, title = NULL) { utils::menu(choices, title = title) } browse_frame <- function(frame, skip) { eval( substitute(browser(skipCalls = skip), list(skip = skip)), envir = frame ) } testthat/R/expect-constant.R0000644000176200001440000000445715127554030015571 0ustar liggesusers#' Do you expect `TRUE` or `FALSE`? #' #' @description #' These are fall-back expectations that you can use when none of the other #' more specific expectations apply. The disadvantage is that you may get #' a less informative error message. #' #' Attributes are ignored. #' #' @inheritParams expect_that #' @family expectations #' @examples #' expect_true(2 == 2) #' # Failed expectations will throw an error #' show_failure(expect_true(2 != 2)) #' #' # where possible, use more specific expectations, to get more informative #' # error messages #' a <- 1:4 #' show_failure(expect_true(length(a) == 3)) #' show_failure(expect_equal(length(a), 3)) #' #' x <- c(TRUE, TRUE, FALSE, TRUE) #' show_failure(expect_true(all(x))) #' show_failure(expect_all_true(x)) #' @name logical-expectations NULL #' @export #' @rdname logical-expectations expect_true <- function(object, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label) exp <- labelled_value(TRUE, "TRUE") expect_waldo_constant_(act, exp, info = info, ignore_attr = TRUE) invisible(act$val) } #' @export #' @rdname logical-expectations expect_false <- function(object, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label) exp <- labelled_value(FALSE, "FALSE") expect_waldo_constant_(act, exp, info = info, ignore_attr = TRUE) invisible(act$val) } #' Do you expect `NULL`? #' #' This is a special case because `NULL` is a singleton so it's possible #' check for it either with `expect_equal(x, NULL)` or `expect_type(x, "NULL")`. #' #' @inheritParams expect_that #' @export #' @family expectations #' @examples #' x <- NULL #' y <- 10 #' #' expect_null(x) #' show_failure(expect_null(y)) expect_null <- function(object, info = NULL, label = NULL) { act <- quasi_label(enquo(object), label) exp <- labelled_value(NULL, "NULL") expect_waldo_constant_(act, exp, info = info) invisible(act$val) } expect_waldo_constant_ <- function( act, exp, ..., info = NULL, trace_env = caller_env() ) { comp <- waldo_compare( act$val, exp$val, ..., x_arg = "actual", y_arg = "expected" ) if (length(comp) != 0) { msg <- c( sprintf("Expected %s to be %s.", act$lab, exp$lab), "Differences:", paste0(comp, "\n") ) fail(msg, info = info, trace_env = trace_env) } else { pass() } } testthat/R/expect-shape.R0000644000176200001440000000601615072252215015030 0ustar liggesusers#' Do you expect an object with this length or shape? #' #' `expect_length()` inspects the [length()] of an object; `expect_shape()` #' inspects the "shape" (i.e. [nrow()], [ncol()], or [dim()]) of #' higher-dimensional objects like data.frames, matrices, and arrays. #' #' @seealso [expect_vector()] to make assertions about the "size" of a vector. #' @inheritParams expect_that #' @param n Expected length. #' @family expectations #' @export #' @examples #' expect_length(1, 1) #' expect_length(1:10, 10) #' show_failure(expect_length(1:10, 1)) #' #' x <- matrix(1:9, nrow = 3) #' expect_shape(x, nrow = 3) #' show_failure(expect_shape(x, nrow = 4)) #' expect_shape(x, ncol = 3) #' show_failure(expect_shape(x, ncol = 4)) #' expect_shape(x, dim = c(3, 3)) #' show_failure(expect_shape(x, dim = c(3, 4, 5))) expect_length <- function(object, n) { check_number_whole(n, min = 0) act <- quasi_label(enquo(object)) act$n <- length(act$val) if (act$n != n) { fail(c( sprintf("Expected %s to have length %i.", act$lab, n), sprintf("Actual length: %i.", act$n) )) } else { pass() } invisible(act$val) } #' @param nrow,ncol Expected [nrow()]/[ncol()] of `object`. #' @param dim Expected [dim()] of `object`. #' @rdname expect_length #' @param ... Not used; used to force naming of other arguments. #' @export expect_shape = function(object, ..., nrow, ncol, dim) { check_dots_empty() check_exclusive(nrow, ncol, dim) act <- quasi_label(enquo(object)) dim_object <- base::dim(object) if (is.null(dim_object)) { fail(sprintf("Expected %s to have dimensions.", act$lab)) } else if (!missing(nrow)) { check_number_whole(nrow, allow_na = TRUE) act$nrow <- dim_object[1L] if (!identical(as.integer(act$nrow), as.integer(nrow))) { fail(c( sprintf("Expected %s to have %i rows.", act$lab, nrow), sprintf("Actual rows: %i.", act$nrow) )) } else { pass() } } else if (!missing(ncol)) { check_number_whole(ncol, allow_na = TRUE) if (length(dim_object) == 1L) { fail(sprintf("Expected %s to have two or more dimensions.", act$lab)) } else { act$ncol <- dim_object[2L] if (!identical(as.integer(act$ncol), as.integer(ncol))) { fail(c( sprintf("Expected %s to have %i columns.", act$lab, ncol), sprintf("Actual columns: %i.", act$ncol) )) } else { pass() } } } else { # !missing(dim) if (!is.numeric(dim) && !is.integer(dim)) { stop_input_type(dim, "a numeric vector") } act$dim <- dim_object if (length(act$dim) != length(dim)) { fail(c( sprintf("Expected %s to have %i dimensions.", act$lab, length(dim)), sprintf("Actual dimensions: %i.", length(act$dim)) )) } else if (!identical(as.integer(act$dim), as.integer(dim))) { fail(c( sprintf("Expected %s to have dim (%s).", act$lab, toString(dim)), sprintf("Actual dim: (%s).", toString(act$dim)) )) } else { pass() } } invisible(act$val) } testthat/R/utils.R0000644000176200001440000000443115054053615013604 0ustar liggesusers#' @importFrom magrittr %>% #' @export magrittr::`%>%` can_entrace <- function(cnd) { !inherits(cnd, "Throwable") } # Need to strip environment and source references to make lightweight # function suitable to send to another process transport_fun <- function(f) { environment(f) <- .GlobalEnv f <- zap_srcref(f) f } # Handled specially in test_code so no backtrace testthat_warn <- function(message, ...) { warn(message, class = "testthat_warn", ...) } split_by_line <- function(x) { trailing_nl <- grepl("\n$", x) x <- strsplit(x, "\n") x[trailing_nl] <- lapply(x[trailing_nl], c, "") x } rstudio_tickle <- function() { if (!is_installed("rstudioapi")) { return() } if (!rstudioapi::hasFun("executeCommand")) { return() } rstudioapi::executeCommand("vcsRefresh") if (!is_positron()) { rstudioapi::executeCommand("refreshFiles") } } is_positron <- function() { nzchar(Sys.getenv("POSITRON", "")) } first_upper <- function(x) { substr(x, 1, 1) <- toupper(substr(x, 1, 1)) x } in_check_reporter <- function() { isTRUE(the$in_check_reporter) } r_version <- function() paste0("R", getRversion()[, 1:2]) # Supress cli wrapping no_wrap <- function(x) { x <- gsub(" ", "\u00a0", x, fixed = TRUE) x <- gsub("\n", "\f", x, fixed = TRUE) x } paste_c <- function(...) { paste0(c(...), collapse = "") } # from rematch2 re_match <- function(text, pattern, perl = TRUE, ...) { stopifnot(is.character(pattern), length(pattern) == 1, !is.na(pattern)) text <- as.character(text) match <- regexpr(pattern, text, perl = perl, ...) start <- as.vector(match) length <- attr(match, "match.length") end <- start + length - 1L matchstr <- substring(text, start, end) matchstr[start == -1] <- NA_character_ res <- data.frame(stringsAsFactors = FALSE, .text = text, .match = matchstr) if (!is.null(attr(match, "capture.start"))) { gstart <- attr(match, "capture.start") glength <- attr(match, "capture.length") gend <- gstart + glength - 1L groupstr <- substring(text, gstart, gend) groupstr[gstart == -1] <- NA_character_ dim(groupstr) <- dim(gstart) res <- cbind(groupstr, res, stringsAsFactors = FALSE) } names(res) <- c(attr(match, "capture.names"), ".text", ".match") class(res) <- c("tbl_df", "tbl", class(res)) res } testthat/R/expect-comparison.R0000644000176200001440000001120715127554030016101 0ustar liggesusers#' Do you expect a value bigger or smaller than this? #' #' These functions compare values of comparable data types, such as numbers, #' dates, and times. #' #' @inheritParams expect_equal #' @param object,expected A value to compare and its expected bound. #' @family expectations #' @examples #' a <- 9 #' expect_lt(a, 10) #' #' \dontrun{ #' expect_lt(11, 10) #' } #' #' a <- 11 #' expect_gt(a, 10) #' \dontrun{ #' expect_gt(9, 10) #' } #' @name comparison-expectations NULL expect_compare_ <- function( operator = c("<", "<=", ">", ">="), act, exp, trace_env = caller_env() ) { operator <- match.arg(operator) op <- match.fun(operator) cmp <- op(act$val, exp$val) if (length(cmp) != 1 || !is.logical(cmp)) { cli::cli_abort( "Result of comparison must be `TRUE`, `FALSE`, or `NA`", call = trace_env ) } else if (!isTRUE(cmp)) { msg <- failure_compare(act, exp, operator) fail(msg, trace_env = trace_env) } else { pass() } } failure_compare <- function(act, exp, operator) { actual_op <- switch(operator, "<" = ">=", "<=" = ">", ">" = "<=", ">=" = "<") msg_exp <- sprintf("Expected %s %s %s.", act$lab, operator, exp$lab) if (is.numeric(act$val)) { digits <- max( digits(act$val), digits(exp$val), min_digits(act$val, exp$val) ) msg_act <- sprintf( "Actual comparison: %s %s %s", num_exact(act$val, digits), actual_op, num_exact(exp$val, digits) ) diff <- act$val - exp$val if (is.na(diff)) { msg_diff <- NULL } else { msg_diff <- sprintf( "Difference: %s %s 0", num_exact(diff, digits), actual_op ) } } else { msg_act <- sprintf( "Actual comparison: \"%s\" %s \"%s\"", act$val, actual_op, exp$val ) if (inherits(act$val, c("Date", "POSIXt"))) { diff <- act$val - exp$val if (is.na(diff)) { msg_diff <- NULL } else { msg_diff <- sprintf( "Difference: %s %s 0 %s", dt_diff(diff), actual_op, attr(diff, "unit") ) } } else { msg_diff <- NULL } } c(msg_exp, msg_act, msg_diff) } #' @export #' @rdname comparison-expectations expect_lt <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label) exp <- quasi_label(enquo(expected), expected.label) expect_compare_("<", act, exp) invisible(act$val) } #' @export #' @rdname comparison-expectations expect_lte <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label) exp <- quasi_label(enquo(expected), expected.label) expect_compare_("<=", act, exp) invisible(act$val) } #' @export #' @rdname comparison-expectations expect_gt <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label) exp <- quasi_label(enquo(expected), expected.label) expect_compare_(">", act, exp) invisible(act$val) } #' @export #' @rdname comparison-expectations expect_gte <- function(object, expected, label = NULL, expected.label = NULL) { act <- quasi_label(enquo(object), label) exp <- quasi_label(enquo(expected), expected.label) expect_compare_(">=", act, exp) invisible(act$val) } # Wordy names ------------------------------------------------------------- #' Deprecated numeric comparison functions #' #' These functions have been deprecated in favour of the more concise #' [expect_gt()] and [expect_lt()]. #' #' @export #' @param ... All arguments passed on to `expect_lt()`/`expect_gt()`. #' @keywords internal expect_less_than <- function(...) { cli::cli_warn("Deprecated: please use {.fn expect_lt} instead.") expect_lt(...) } #' @rdname expect_less_than #' @export expect_more_than <- function(...) { cli::cli_warn("Deprecated: please use {.fn expect_gt} instead.") expect_gt(...) } # Helpers ----------------------------------------------------------------- num_exact <- function(x, digits = 6) { sprintf(paste0("%0.", digits, "f"), x) } min_digits <- function(x, y, tolerance = testthat_tolerance()) { if (is.integer(x) && is.integer(y)) { return(0L) } attributes(x) <- NULL attributes(y) <- NULL n <- digits(abs(x - y)) if (!is.null(tolerance)) { n <- min(n, digits(tolerance)) } as.integer(n) + 1L } digits <- function(x) { x <- x[!is.na(x) & x != 0] if (length(x) == 0) { return(0) } scale <- -log10(min(abs(x))) if (scale <= 0) { 0L } else { ceiling(round(scale, digits = 2)) } } dt_diff <- function(x) { val <- unclass(x) digits <- digits(abs(val)) + 1 paste(num_exact(val, digits), attr(x, "unit")) } testthat/R/snapshot-value.R0000644000176200001440000000707015047715224015422 0ustar liggesusers#' Do you expect this code to return the same value as last time? #' #' Captures the result of function, flexibly serializing it into a text #' representation that's stored in a snapshot file. See [expect_snapshot()] #' for more details on snapshot testing. #' #' @param style Serialization style to use: #' * `json` uses [jsonlite::fromJSON()] and [jsonlite::toJSON()]. This #' produces the simplest output but only works for relatively simple #' objects. #' * `json2` uses [jsonlite::serializeJSON()] and [jsonlite::unserializeJSON()] #' which are more verbose but work for a wider range of type. #' * `deparse` uses [deparse()], which generates a depiction of the object #' using R code. #' * `serialize()` produces a binary serialization of the object using #' [serialize()]. This is all but guaranteed to work for any R object, #' but produces a completely opaque serialization. #' @param ... Passed on to [waldo::compare()] so you can control the details of #' the comparison. #' @inheritParams expect_snapshot #' @inheritParams compare #' @export expect_snapshot_value <- function( x, style = c("json", "json2", "deparse", "serialize"), cran = FALSE, tolerance = testthat_tolerance(), ..., variant = NULL ) { edition_require(3, "expect_snapshot_value()") style <- arg_match(style) check_bool(cran) check_number_decimal(tolerance, min = 0) variant <- check_variant(variant) lab <- quo_label(enquo(x)) save <- switch( style, json = function(x) jsonlite::toJSON(x, auto_unbox = TRUE, pretty = TRUE), json2 = function(x) jsonlite::serializeJSON(x, pretty = TRUE), deparse = function(x) paste0(deparse(x), collapse = "\n"), serialize = function(x) { jsonlite::base64_enc(serialize(x, NULL, version = 2)) } ) load <- switch( style, json = function(x) jsonlite::fromJSON(x, simplifyVector = FALSE), json2 = function(x) jsonlite::unserializeJSON(x), deparse = function(x) reparse(x), serialize = function(x) unserialize(jsonlite::base64_dec(x)) ) with_is_snapshotting(force(x)) check_roundtrip( x, load(save(x)), label = lab, style = style, ..., tolerance = tolerance ) expect_snapshot_helper( lab, x, save = save, load = load, cran = cran, ..., tolerance = tolerance, variant = variant, trace_env = caller_env() ) } # Safe environment for evaluating deparsed objects, based on inspection of # https://github.com/wch/r-source/blob/5234fe7b40aad8d3929d240c83203fa97d8c79fc/src/main/deparse.c#L845 reparse <- function(x) { env <- env(emptyenv()) env_bind( env, !!!env_get_list( base_env(), c( c("c", "structure", ":", "-"), c("list", "numeric", "integer", "logical", "character"), "function", c("quote", "alist", "pairlist", "as.pairlist", "expression") ) ) ) env_bind(env, !!!env_get_list(ns_env("methods"), c("new", "getClass"))) eval(parse(text = x), env) } check_roundtrip <- function( x, y, label, style, ..., tolerance = testthat_tolerance(), error_call = caller_env() ) { check <- waldo_compare( x, y, x_arg = "original", y_arg = "new", ..., tolerance = tolerance ) if (length(check) > 0) { cli::cli_abort( c( "{.code {label}} could not be safely serialized with {.arg style} = {.str {style}}.", " " = "Serializing then deserializing the object returned something new:\n\n{no_wrap(check)}\n", i = "You may need to try a different {.arg style}." ), call = error_call ) } } testthat/R/reporter-list.R0000644000176200001440000001443415072252215015260 0ustar liggesusersmethods::setOldClass("proc_time") #' Capture test results and metadata #' #' This reporter gathers all results, adding additional information such as #' test elapsed time, and test filename if available. Very useful for reporting. #' #' @export #' @family reporters ListReporter <- R6::R6Class( "ListReporter", inherit = Reporter, public = list( running = NULL, current_file = "", # so we can still subset with this results = NULL, initialize = function() { super$initialize() self$capabilities$parallel_support <- TRUE self$capabilities$parallel_updates <- TRUE self$results <- Stack$new() self$running <- new.env(parent = emptyenv()) }, start_test = function(context, test) { # is this a new test block? if ( !identical(self$running[[self$current_file]]$context, context) || !identical(self$running[[self$current_file]]$test, test) ) { self$running[[self$current_file]]$context <- context self$running[[self$current_file]]$test <- test self$running[[self$current_file]]$expectations <- Stack$new() self$running[[self$current_file]]$start_time <- proc.time() } }, add_result = function(context, test, result) { if (is.null(self$running[[self$current_file]]$expectations)) { # we received a result outside of a test: # could be a bare expectation or an exception/error if (!inherits(result, 'error')) { return() } self$running[[self$current_file]]$expectations <- Stack$new() } self$running[[self$current_file]]$expectations$push(result) }, end_test = function(context, test) { elapsed <- as.double( proc.time() - self$running[[self$current_file]]$start_time ) results <- list() if (!is.null(self$running[[self$current_file]]$expectations)) { results <- self$running[[self$current_file]]$expectations$as_list() } self$results$push(list( file = self$current_file %||% NA_character_, context = context, test = test, user = elapsed[1], system = elapsed[2], real = elapsed[3], results = results )) self$running[[self$current_file]]$expectations <- NULL }, start_file = function(name) { if (!name %in% names(self$running)) { newfile <- list( start_time = NA, expectations = NULL, context = NULL, test = NULL ) assign(name, newfile, envir = self$running) } self$current_file <- name }, end_file = function() { # fallback in case we have errors but no expectations self$end_context(self$current_file) rm(list = self$current_file, envir = self$running) }, end_context = function(context) { results <- self$running[[self$current_file]]$expectations if (is.null(results)) { return() } self$running[[self$current_file]]$expectations <- NULL # look for exceptions raised outside of tests # they happened just before end_context since they interrupt the test_ # file execution results <- results$as_list() if (length(results) == 0) { return() } self$results$push(list( file = self$current_file %||% NA_character_, context = context, test = NA_character_, user = NA_real_, system = NA_real_, real = NA_real_, results = results )) }, get_results = function() { testthat_results(self$results$as_list()) } ) ) #' Create a `testthat_results` object from the test results #' as stored in the ListReporter results field. #' #' @param results a list as stored in ListReporter #' @return its list argument as a `testthat_results` object #' @seealso ListReporter #' @keywords internal testthat_results <- function(results) { stopifnot(is.list(results)) structure(results, class = "testthat_results") } # return if all tests are successful w/o error all_passed <- function(res) { if (length(res) == 0) { return(TRUE) } df <- as.data.frame.testthat_results(res) sum(df$failed) == 0 && !any(df$error) } any_warnings <- function(res) { if (length(res) == 0) { return(FALSE) } df <- as.data.frame.testthat_results(res) any(df$warning > 0) } #' @export as.data.frame.testthat_results <- function(x, ...) { if (length(x) == 0) { return( data.frame( file = character(0), context = character(0), test = character(0), nb = integer(0), failed = integer(0), skipped = logical(0), error = logical(0), warning = integer(0), user = numeric(0), system = numeric(0), real = numeric(0), passed = integer(0), result = list(), stringsAsFactors = FALSE ) ) } rows <- lapply(x, summarize_one_test_results) do.call(rbind, rows) } summarize_one_test_results <- function(test) { test_results <- test$results nb_tests <- length(test_results) nb_failed <- nb_skipped <- nb_warning <- nb_passed <- 0L error <- FALSE if (nb_tests > 0) { # error reports should be handled differently. # They may not correspond to an expect_that() test so remove them last_test <- test_results[[nb_tests]] error <- expectation_error(last_test) if (error) { test_results <- test_results[-nb_tests] nb_tests <- length(test_results) } nb_passed <- sum(map_lgl(test_results, expectation_success)) nb_skipped <- sum(map_lgl(test_results, expectation_skip)) nb_failed <- sum(map_lgl(test_results, expectation_failure)) nb_warning <- sum(map_lgl(test_results, expectation_warning)) } context <- if (length(test$context) > 0) test$context else "" res <- data.frame( file = test$file, context = context, test = test$test, nb = nb_tests, failed = nb_failed, skipped = as.logical(nb_skipped), error = error, warning = nb_warning, user = test$user, system = test$system, real = test$real, stringsAsFactors = FALSE ) # Added at end for backward compatibility res$passed <- nb_passed # Cannot easily add list columns in data.frame() res$result <- list(test_results) res } #' @export print.testthat_results <- function(x, ...) { print(as.data.frame(x)) } testthat/R/reporter-stop.R0000644000176200001440000000372415072252215015272 0ustar liggesusers#' Error if any test fails #' #' @description #' The default reporter used when [expect_that()] is run interactively. #' It responds by displaying a summary of the number of successes and failures #' and [stop()]ping on if there are any failures. #' #' @export #' @family reporters StopReporter <- R6::R6Class( "StopReporter", inherit = Reporter, public = list( # All expectations that need to be reported (error, failure, warning, skip) issues = NULL, # Expectations that should cause the test to fail (error, failure) n_fail = 0L, # Successful expectations n_success = 0L, praise = TRUE, depth = 0, initialize = function(praise = TRUE) { super$initialize() self$issues <- Stack$new() self$praise <- praise }, start_test = function(context, test) { if (self$depth == 0) { self$n_fail <- 0L self$n_success <- 0L self$issues <- Stack$new() } self$depth <- self$depth + 1 }, add_result = function(context, test, result) { if (expectation_success(result)) { self$n_success <- self$n_success + 1 return() } if (expectation_broken(result)) { self$n_fail <- self$n_fail + 1 } self$issues$push(result) }, end_test = function(context, test) { self$depth <- self$depth - 1 if (self$depth > 0) { return() } self$local_user_output() for (issue in self$issues$as_list()) { self$cat_line(issue_summary(issue, rule = TRUE, location = FALSE)) } if (self$praise && self$n_fail == 0 && self$n_success > 0) { emoji <- praise_emoji() self$cat_line(cli::format_inline( "{.strong Test passed with {self$n_success} success{?es}{emoji}}." )) } if (self$n_fail > 0) { cli::cli_abort( "Test failed with {self$n_fail} failure{?s} and {self$n_success} success{?es}.", call = NULL ) } } ) ) testthat/R/expect-self-test.R0000644000176200001440000001216515127554030015641 0ustar liggesuserscapture_success_failure <- function(expr) { cnd <- NULL n_success <- 0 n_failure <- 0 last_failure <- NULL withCallingHandlers( expr, expectation_failure = function(cnd) { last_failure <<- cnd n_failure <<- n_failure + 1 invokeRestart("muffle_expectation") }, expectation_success = function(cnd) { n_success <<- n_success + 1 invokeRestart("muffle_expectation") } ) list( n_success = n_success, n_failure = n_failure, last_failure = last_failure ) } format_success_failure <- function(status, exp_n_success, exp_n_failure) { pluralise <- function(n, singular, plural) { paste(n, ngettext(n, singular, plural)) } tick <- cli::col_green(cli::symbol$tick) cross <- cli::col_red(cli::symbol$cross) success_ok <- status$n_success == exp_n_success failure_ok <- status$n_failure == exp_n_failure c( sprintf( "Expected %s and %s.", pluralise(exp_n_success, "success", "successes"), pluralise(exp_n_failure, "failure", "failures") ), sprintf( "%s Observed %s.", if (success_ok) tick else cross, pluralise(status$n_success, "success", "successes") ), sprintf( "%s Observed %s.", if (failure_ok) tick else cross, pluralise(status$n_failure, "failure", "failures") ) ) } #' Test your custom expectations #' #' @description #' `expect_success()` checks that there's exactly one success and no failures; #' `expect_failure()` checks that there's exactly one failure and no successes. #' `expect_snapshot_failure()` records the failure message so that you can #' manually check that it is informative. #' #' Use `show_failure()` in examples to print the failure message without #' throwing an error. #' #' @param expr Code to evaluate #' @param message Check that the failure message matches this regexp. #' @param ... Other arguments passed on to [expect_match()]. #' @export expect_success <- function(expr) { status <- capture_success_failure(expr) if (status$n_success == 1 && status$n_failure == 0) { pass() return(invisible()) } fail(format_success_failure(status, exp_n_success = 1, exp_n_failure = 0)) invisible() } #' @export #' @rdname expect_success expect_failure <- function(expr, message = NULL, ...) { status <- capture_success_failure(expr) if (status$n_failure == 1 && status$n_success == 0) { if (is.null(message)) { pass() } else { act <- labelled_value(status$last_failure$message, "failure message") expect_match_(act, message, ..., title = "message") } return(invisible()) } fail(format_success_failure(status, exp_n_success = 0, exp_n_failure = 1)) invisible() } #' @export #' @rdname expect_success expect_snapshot_failure <- function(expr) { expr <- enquo0(expr) expect_snapshot_(expr, error = TRUE, error_class = "expectation_failure") } #' Test for absence of success or failure #' #' @description #' `r lifecycle::badge("deprecated")` #' #' These functions are deprecated because [expect_success()] and #' [expect_failure()] now test for exactly one success or no failures, and #' exactly one failure and no successes. #' #' @keywords internal #' @export expect_no_success <- function(expr) { lifecycle::deprecate_warn("3.3.0", "expect_no_success()", "expect_failure()") status <- capture_success_failure(expr) if (status$n_success > 0) { fail("Expectation succeeded") } else { pass() } invisible() } #' @export #' @rdname expect_no_success expect_no_failure <- function(expr) { lifecycle::deprecate_warn("3.3.0", "expect_no_failure()", "expect_success()") status <- capture_success_failure(expr) if (status$n_failure > 0) { fail("Expectation failed") } else { pass() } invisible() } expect_snapshot_skip <- function(x, cran = FALSE) { expect_snapshot_condition_("skip", x) } expect_skip <- function(code) { expect_condition_matching_("skip", code) } expect_no_skip <- function(code) { expect_no_("skip", code) } #' @export #' @rdname expect_success show_failure <- function(expr) { exp <- capture_expectation(expr) if (!is.null(exp) && expectation_failure(exp)) { cat(cli::style_bold("Failed expectation:\n")) cat(exp$message, "\n", sep = "") } invisible() } expect_snapshot_reporter <- function( reporter, paths = test_path("reporters/tests.R") ) { local_options(rlang_trace_format_srcrefs = FALSE) withr::local_seed(1014) expect_snapshot_output( with_reporter(reporter, { for (path in paths) { test_one_file(path) } }) ) } # Use specifically for testthat tests in order to override the # defaults found when starting the reporter local_output_override <- function( width = 80, crayon = TRUE, unicode = TRUE, .env = parent.frame() ) { reporter <- get_reporter() if (is.null(reporter)) { return() } old_width <- reporter$width old_crayon <- reporter$crayon old_unicode <- reporter$unicode reporter$width <- width reporter$crayon <- crayon reporter$unicode <- unicode withr::defer( { reporter$width <- old_width reporter$crayon <- old_crayon reporter$unicode <- old_unicode }, .env ) } testthat/R/reporter.R0000644000176200001440000001310515127561732014311 0ustar liggesusers#' Manage test reporting #' #' The job of a reporter is to aggregate the results from files, tests, and #' expectations and display them in an informative way. Every testthat function #' that runs multiple tests provides a `reporter` argument which you can #' use to override the default (which is selected by [default_reporter()]). #' #' You only need to use this `Reporter` object directly if you are creating #' a new reporter. Currently, creating new Reporters is undocumented, #' so if you want to create your own, you'll need to make sure that you're #' familiar with [R6](https://adv-r.hadley.nz/r6.html) and then need read the #' source code for a few. #' #' @keywords internal #' @export #' @export Reporter #' @aliases Reporter #' @importFrom R6 R6Class #' @family reporters #' @examples #' path <- testthat_example("success") #' #' test_file(path) #' # Override the default by supplying the name of a reporter #' test_file(path, reporter = "minimal") Reporter <- R6::R6Class( "Reporter", public = list( capabilities = list(parallel_support = FALSE, parallel_updates = FALSE), start_reporter = function() {}, start_context = function(context) {}, start_test = function(context, test) {}, start_file = function(filename) {}, add_result = function(context, test, result) {}, end_test = function(context, test) {}, end_context = function(context) {}, end_reporter = function() {}, end_file = function() {}, is_full = function() FALSE, update = function() {}, width = 80, unicode = TRUE, crayon = TRUE, rstudio = TRUE, hyperlinks = TRUE, out = NULL, initialize = function(file = getOption("testthat.output_file", stdout())) { if (is.character(file)) { file <- normalizePath(file, mustWork = FALSE) } self$out <- file if (is.character(self$out) && file.exists(self$out)) { # If writing to a file, overwrite it if it exists file.remove(self$out) } # Capture at init so not affected by test settings self$width <- cli::console_width() self$unicode <- cli::is_utf8_output() self$crayon <- cli::num_ansi_colors() > 1 self$rstudio <- Sys.getenv("RSTUDIO") == "1" self$hyperlinks <- cli::ansi_hyperlink_types()[["run"]] }, # To be used when the reporter needs to produce output inside of an active # test, which is almost always from $add_result() local_user_output = function(.env = parent.frame()) { local_reproducible_output( width = self$width, crayon = self$crayon, rstudio = self$rstudio, hyperlinks = self$hyperlinks, .env = .env ) # Can't set unicode with local_reproducible_output() because it can # generate a skip if you're temporarily using a non-UTF-8 locale withr::local_options(cli.unicode = self$unicode, .local_envir = .env) }, cat_tight = function(...) { cat(..., sep = "", file = self$out, append = TRUE) }, cat_line = function(...) { cli::cat_line(..., file = self$out) }, rule = function(...) { cli::cat_rule(..., file = self$out) }, # The hierarchy of contexts are implied - a context starts with a # call to context(), and ends either with the end of the file, or # with the next call to context() in the same file. These private # methods paper over the details so that context appear to work # in the same way as tests and expectations. .context = NULL, .start_context = function(context) { if (!is.null(self$.context)) { self$end_context(self$.context) } self$.context <- context self$start_context(context) invisible() }, end_context_if_started = function(context) { if (!is.null(self$.context)) { self$end_context(self$.context) self$.context <- NULL } invisible() } ) ) #' Determine default reporters #' #' @description #' These three functions are used to determine the default reporters used #' for `test_dir()`, `test_file()`, and `test_package()`: #' #' * `default_reporter()` returns the default reporter for [test_dir()]. #' If `parallel` is `TRUE`, it uses [ParallelProgressReporter], which you #' can override with option `testthat.default_parallel_reporter`. #' If `parallel` is `FALSE`, it uses [ProgressReporter], which you #' can override with option `testthat.default_reporter`. #' #' * `default_compact_reporter()` returns the default reporter for #' [test_file()]. It defaults to [CompactProgressReporter], which you can #' override with the `testthat.default_compact_reporter` option. #' #' * `check_reporter()` returns the default reporter for [test_package()]. #' It defaults to [CheckReporter], which you can override with the #' `testthat.default_check_reporter` option. #' #' Both `default_reporter()` and `default_compact_reporter()` will use #' [LlmReporter] if it appears that the tests are being run by a coding agent. #' #' @param parallel If `TRUE`, return a reporter suitable for parallel testing. #' @export #' @keywords internal default_reporter <- function(parallel = FALSE) { if (is_llm()) { "Llm" } else if (parallel) { getOption("testthat.default_parallel_reporter", "ParallelProgress") } else { getOption("testthat.default_reporter", "Progress") } } #' @export #' @rdname default_reporter default_compact_reporter <- function() { if (is_llm()) { "Llm" } else { getOption("testthat.default_compact_reporter", "CompactProgress") } } #' @export #' @rdname default_reporter check_reporter <- function() { getOption("testthat.default_check_reporter", "Check") } testthat/R/reporter-multi.R0000644000176200001440000000327615053661631015446 0ustar liggesusers#' Run multiple reporters at the same time #' #' This reporter is useful to use several reporters at the same time, e.g. #' adding a custom reporter without removing the current one. #' #' @export #' @family reporters MultiReporter <- R6::R6Class( "MultiReporter", inherit = Reporter, public = list( reporters = list(), initialize = function(reporters = list()) { super$initialize() self$capabilities$parallel_support <- TRUE self$reporters <- reporters }, start_reporter = function() { o_apply(self$reporters, "start_reporter") }, start_file = function(filename) { o_apply(self$reporters, "start_file", filename) }, start_context = function(context) { o_apply(self$reporters, "start_context", context) }, start_test = function(context, test) { o_apply(self$reporters, "start_test", context, test) }, add_result = function(context, test, result) { o_apply( self$reporters, "add_result", context = context, test = test, result = result ) }, end_test = function(context, test) { o_apply(self$reporters, "end_test", context, test) }, end_context = function(context) { o_apply(self$reporters, "end_context", context) }, end_reporter = function() { o_apply(self$reporters, "end_reporter") }, end_file = function() { o_apply(self$reporters, "end_file") }, update = function() { o_apply(self$reporters, "update") } ) ) o_apply <- function(objects, method, ...) { x <- NULL # silence check note f <- new_function( exprs(x = ), expr( `$`(x, !!method)(...) ) ) lapply(objects, f) } testthat/R/snapshot.R0000644000176200001440000003246715127561732014322 0ustar liggesusers#' Do you expect this code to run the same way as last time? #' #' @description #' Snapshot tests (aka golden tests) are similar to unit tests except that the #' expected result is stored in a separate file that is managed by testthat. #' Snapshot tests are useful for when the expected value is large, or when #' the intent of the code is something that can only be verified by a human #' (e.g. this is a useful error message). Learn more in #' `vignette("snapshotting")`. #' #' `expect_snapshot()` runs code as if you had executed it at the console, and #' records the results, including output, messages, warnings, and errors. #' If you just want to compare the result, try [expect_snapshot_value()]. #' #' @section Workflow: #' The first time that you run a snapshot expectation it will run `x`, #' capture the results, and record them in `tests/testthat/_snaps/{test}.md`. #' Each test file gets its own snapshot file, e.g. `test-foo.R` will get #' `_snaps/foo.md`. #' #' It's important to review the Markdown files and commit them to git. They are #' designed to be human readable, and you should always review new additions #' to ensure that the salient information has been captured. They should also #' be carefully reviewed in pull requests, to make sure that snapshots have #' updated in the expected way. #' #' On subsequent runs, the result of `x` will be compared to the value stored #' on disk. If it's different, the expectation will fail, and a new file #' `_snaps/{test}.new.md` will be created. If the change was deliberate, #' you can approve the change with [snapshot_accept()] and then the tests will #' pass the next time you run them. #' #' Note that snapshotting can only work when executing a complete test file #' (with [test_file()], [test_dir()], or friends) because there's otherwise #' no way to figure out the snapshot path. If you run snapshot tests #' interactively, they'll just display the current value. #' #' @param x Code to evaluate. #' @param cran Should these expectations be verified on CRAN? By default, #' they are not, because snapshot tests tend to be fragile because they #' often rely on minor details of dependencies. #' @param error Do you expect the code to throw an error? The expectation #' will fail (even on CRAN) if an unexpected error is thrown or the #' expected error is not thrown. #' @param variant If non-`NULL`, results will be saved in #' `_snaps/{variant}/{test.md}`, so `variant` must be a single string #' suitable for use as a directory name. #' #' You can use variants to deal with cases where the snapshot output varies #' and you want to capture and test the variations. Common use cases include #' variations for operating system, R version, or version of key dependency. #' Variants are an advanced feature. When you use them, you'll need to #' carefully think about your testing strategy to ensure that all important #' variants are covered by automated tests, and ensure that you have a way #' to get snapshot changes out of your CI system and back into the repo. #' #' Note that there's no way to declare all possible variants up front which #' means that as soon as you start using variants, you are responsible for #' deleting snapshot variants that are no longer used. (testthat will still #' delete all variants if you delete the test.) #' @param transform Optionally, a function to scrub sensitive or stochastic #' text from the output. Should take a character vector of lines as input #' and return a modified character vector as output. #' @param cnd_class Whether to include the class of messages, #' warnings, and errors in the snapshot. Only the most specific #' class is included, i.e. the first element of `class(cnd)`. #' @export expect_snapshot <- function( x, cran = FALSE, error = FALSE, transform = NULL, variant = NULL, cnd_class = FALSE ) { edition_require(3, "expect_snapshot()") x <- enquo0(x) expect_snapshot_( x, cran = cran, error = error, transform = transform, variant = variant, cnd_class = cnd_class ) } expect_snapshot_ <- function( x, cran = TRUE, error = FALSE, error_class = NULL, transform = NULL, variant = NULL, cnd_class = FALSE, error_frame = caller_env() ) { check_bool(cran, call = error_frame) check_bool(error, call = error_frame) check_bool(cnd_class, call = error_frame) variant <- check_variant(variant) if (!is.null(transform)) { transform <- as_function(transform) } # Execute code, capturing last error state <- new_environment(list(error = NULL)) replay <- function(x) { snapshot_replay( x, state, transform = transform, cnd_class = cnd_class ) } with_is_snapshotting( out <- verify_exec(quo_get_expr(x), quo_get_env(x), replay) ) # Use expect_error() machinery to confirm that error is as expected msg <- compare_condition_3e( cond_type = "error", cond_class = error_class, cond = state$error, lab = quo_label(x), expected = error ) if (!is.null(msg)) { if (error) { fail(msg, trace = state$error[["trace"]]) } else { # This might be a failed expectation, so we need to make sure # that we can muffle it withRestarts( cnd_signal(state$error), muffle_expectation = function() NULL ) } return() } expect_snapshot_helper( "code", out, cran = cran, save = function(x) paste0(x, collapse = "\n"), load = function(x) split_by_line(x)[[1]], variant = variant, trace_env = error_frame ) } snapshot_replay <- function(x, state, ..., transform = NULL) { UseMethod("snapshot_replay", x) } #' @export snapshot_replay.character <- function(x, state, ..., transform = NULL) { c(snap_header(state, "Output"), snapshot_lines(x, transform)) } #' @export snapshot_replay.source <- function(x, state, ..., transform = NULL) { c(snap_header(state, "Code"), snapshot_lines(x$src)) } #' @export snapshot_replay.condition <- function( x, state, ..., transform = NULL, cnd_class = FALSE ) { cnd_message <- env_get(ns_env("rlang"), "cnd_message") if (inherits(x, "message")) { msg <- cnd_message(x) type <- "Message" } else { if (inherits(x, "error")) { state$error <- x } msg <- cnd_message(x, prefix = TRUE) type <- "Condition" } if (cnd_class) { type <- paste0(type, " <", error_class(x), ">") } c(snap_header(state, type), snapshot_lines(msg, transform)) } error_class <- function(x) { # If error was entraced from base R error, use original error class # This is a little fragile because entrace() does not document this behaviour if (inherits(x, "rlang_error") && !is.null(x$error)) { x <- x$error } class(x)[[1]] } snapshot_lines <- function(x, transform = NULL) { x <- split_lines(x) if (!is.null(transform)) { x <- transform(x) } x <- indent(x) x } add_implicit_nl <- function(x) { if (substr(x, nchar(x), nchar(x)) == "\n") { x } else { paste0(x, "\n") } } snap_header <- function(state, header) { if (!identical(state$header, header)) { state$header <- header header } } #' Snapshot helpers #' #' @description #' `r lifecycle::badge("questioning")` #' #' These snapshotting functions are questioning because they were developed #' before [expect_snapshot()] and we're not sure that they still have a #' role to play. #' #' * `expect_snapshot_output()` captures just output printed to the console. #' * `expect_snapshot_error()` captures an error message and #' optionally checks its class. #' * `expect_snapshot_warning()` captures a warning message and #' optionally checks its class. #' #' @inheritParams expect_snapshot #' @keywords internal #' @export expect_snapshot_output <- function(x, cran = FALSE, variant = NULL) { check_bool(cran) edition_require(3, "expect_snapshot_output()") variant <- check_variant(variant) lab <- quo_label(enquo(x)) with_is_snapshotting( val <- capture_output_lines(x, print = TRUE, width = NULL) ) expect_snapshot_helper( lab, val, cran = cran, save = function(x) paste0(x, collapse = "\n"), load = function(x) split_by_line(x)[[1]], variant = variant, trace_env = caller_env() ) } #' @param class Class of expected error or warning. The expectation will #' always fail (even on CRAN) if an error of this class isn't seen #' when executing `x`. #' @export #' @rdname expect_snapshot_output expect_snapshot_error <- function( x, class = "error", cran = FALSE, variant = NULL ) { check_string(class) check_bool(cran) edition_require(3, "expect_snapshot_error()") expect_snapshot_condition_( "error", {{ x }}, class = class, cran = cran, variant = variant ) } #' @export #' @rdname expect_snapshot_output expect_snapshot_warning <- function( x, class = "warning", cran = FALSE, variant = NULL ) { check_string(class) check_bool(cran) edition_require(3, "expect_snapshot_warning()") expect_snapshot_condition_( "warning", {{ x }}, class = class, cran = cran, variant = variant ) } expect_snapshot_condition_ <- function( base_class, x, class = base_class, cran = FALSE, variant = NULL, trace_env = caller_env() ) { variant <- check_variant(variant) lab <- quo_label(enquo(x)) with_is_snapshotting( val <- capture_matching_condition(x, cnd_matcher(class)) ) if (is.null(val)) { if (base_class == class) { msg <- sprintf("%s did not generate %s", lab, base_class) } else { msg <- sprintf( "%s did not generate %s with class '%s'", lab, base_class, class ) } return(snapshot_fail(msg, trace_env = trace_env)) } expect_snapshot_helper( lab, conditionMessage(val), cran = cran, variant = variant, trace_env = trace_env ) } expect_snapshot_helper <- function( lab, val, cran = FALSE, save = identity, load = identity, ..., tolerance = testthat_tolerance(), variant = NULL, trace_env = caller_env() ) { if (!cran && on_cran()) { signal_snapshot_on_cran() return(invisible()) } snapshotter <- get_snapshotter() if (is.null(snapshotter)) { snapshot_not_available(save(val)) return(invisible()) } comp <- snapshotter$take_snapshot( val, save = save, load = load, ..., tolerance = tolerance, variant = variant, trace_env = trace_env ) if (inherits(comp, "expectation_failure")) { return(comp) } if (!identical(variant, "_default")) { variant_lab <- paste0(" (variant '", variant, "')") } else { variant_lab <- "" } if (length(comp) != 0) { hint <- snapshot_hint(snapshotter$file) msg <- c( sprintf("Snapshot of %s has changed%s:", lab, variant_lab), comp, hint ) snapshot_fail(msg, trace_env = trace_env) } else { pass() } invisible() } snapshot_hint <- function(id, show_accept = TRUE, reset_output = TRUE) { if (in_check_reporter()) { return("") } if (reset_output) { local_reporter_output() } full_name <- paste0(id, collapse = "/") args <- c(full_name, snapshot_hint_path()) args <- encodeString(args, quote = '"') args <- paste0(args, collapse = ", ") accept_link <- cli::format_inline("{.run testthat::snapshot_accept({args})}") review_link <- cli::format_inline("{.run testthat::snapshot_review({args})}") out <- c( if (show_accept) sprintf("* Run %s to accept the change.", accept_link), sprintf("* Run %s to review the change.", review_link) ) structure(out, class = "testthat_hint") } # Include path argument if we're in a different working directory snapshot_hint_path <- function() { wd <- Sys.getenv("TESTTHAT_WD", unset = "") if (wd == "") { return() } test_path <- file.path(wd, "tests/testthat") if (test_path == getwd()) { return() } old <- normalizePath(wd) new <- normalizePath(getwd()) if (startsWith(new, old)) { substr(new, nchar(old) + 2, nchar(new)) } else { new } } #' @export print.testthat_hint <- function(x, ...) { cat(paste0(x, "\n", collapse = "")) invisible(x) } snapshot_not_available <- function(message) { local_reporter_output() cat(cli::rule("Snapshot"), "\n", sep = "") cli::cli_inform(c( i = "Can't save or compare to reference when testing interactively." )) cat(message, "\n", sep = "") cat(cli::rule(), "\n", sep = "") } local_snapshot_dir <- function(snap_names, .env = parent.frame()) { path <- withr::local_tempdir(.local_envir = .env) dir.create(file.path(path, "_snaps"), recursive = TRUE) dirs <- setdiff(unique(dirname(snap_names)), ".") for (dir in dirs) { dir.create( file.path(path, "_snaps", dir), recursive = TRUE, showWarnings = FALSE ) } snap_paths <- file.path(path, "_snaps", snap_names) lapply(snap_paths, brio::write_lines, text = "") path } # if transform() wiped out the full message, don't indent, #1487 indent <- function(x) if (length(x)) paste0(" ", x) else x check_variant <- function(x, call = caller_env()) { if (is.null(x)) { "_default" } else if (is_string(x)) { x } else { cli::cli_abort("If supplied, {.arg variant} must be a string.", call = call) } } with_is_snapshotting <- function(code) { withr::local_envvar(TESTTHAT_IS_SNAPSHOT = "true") code } signal_snapshot_on_cran <- function() { withRestarts( signal(class = "snapshot_on_cran"), muffle_cran_snapshot = function() {} ) } testthat/R/praise.R0000644000176200001440000000310115056632045013722 0ustar liggesusers# nocov start praise <- function() { plain <- c( "You rock!", "You are a coding rockstar!", "Keep up the good work.", "Woot!", "Way to go!", "Nice code.", praise::praise("Your tests are ${adjective}!"), praise::praise("${EXCLAMATION} - ${adjective} code.") ) utf8 <- c( "\U0001f600", # smile "\U0001f973", # party face "\U0001f638", # cat grin paste0(strrep("\U0001f389\U0001f38a", 5), "\U0001f389"), "\U0001f485 Your tests are beautiful \U0001f485", "\U0001f947 Your tests deserve a gold medal \U0001f947", "\U0001f308 Your tests are over the rainbow \U0001f308", "\U0001f9ff Your tests look perfect \U0001f9ff", "\U0001f3af Your tests hit the mark \U0001f3af", "\U0001f41d Your tests are the bee's knees \U0001f41d", "\U0001f3b8 Your tests rock \U0001f3b8", "\U0001f44f Your tests get an ovation \U0001f44f" ) x <- if (cli::is_utf8_output()) c(plain, utf8) else plain sample(x, 1) } praise_emoji <- function() { if (!cli::is_utf8_output()) { return("") } emoji <- c( "\U0001f600", # smile "\U0001f973", # party face "\U0001f638", # cat grin "\U0001f308", # rainbow "\U0001f947", # gold medal "\U0001f389", # party popper "\U0001f38a" # confetti ball ) paste0(" ", sample(emoji, 1)) } encourage <- function() { x <- c( "Keep trying!", "Don't worry, you'll get it.", "No one is perfect!", "No one gets it right on their first try", "Frustration is a natural part of programming :)", "I believe in you!" ) sample(x, 1) } # nocov end testthat/R/parallel.R0000644000176200001440000003251515127561732014251 0ustar liggesusers# /-----------------------------\ /-------------------------------\ # | Main R process | | Subprocess 1 | # | +------------------------+ | | +---------------------------+ | # | | test_dir_parallel() | | | | test_file() | | # | | +-------------------+ | | | | +-----------------------+ | | # | | | Event loop |< ------+ | | | SubprocessReporter | | | # | | +-------------------+ | | | | | | +-------------------+ | | | # | | | | | | | | | | test_that() | | | | # | | v | | | | | | +-------------------+ | | | # | | +-------------------+ | | | | | | | | | | # | | | Progress2Reporter | | | | | | | v | | | # | | +-------------------+ | | | | | | +-------------------+ | | | # | +------------------------+ | |--------| signalCondition() | | | | # \-----------------------------/ | | | | +-------------------+ | | | # | | | +-----------------------+ | | # | | +---------------------------+ | # | \-------------------------------/ # | /-------------------------------\ # |--| Subprocess 2 | # | \-------------------------------/ # | /-------------------------------\ # \--| Subprocess 3 | # \-------------------------------/ # # ## Notes # # * Subprocesses run `callr::r_session` R sessions. They are re-used, # one R session can be used for several test_file() calls. # * Helper and setup files are loaded in the subprocesses after this. # * The main process puts all test files in the task queue, and then # runs an event loop. test_files_parallel <- function( test_dir, test_package, test_paths, load_helpers = TRUE, reporter = default_reporter(TRUE), env = NULL, stop_on_failure = FALSE, stop_on_warning = FALSE, wrap = TRUE, # unused, to match test_files signature load_package = c("none", "installed", "source"), shuffle = FALSE ) { # TODO: support timeouts. 20-30s for each file by default? num_workers <- min(default_num_cpus(), length(test_paths)) cli::cli_inform("Starting {num_workers} test process{?es}.") # Set up work queue ------------------------------------------ queue <- NULL withr::defer(queue_teardown(queue)) # Start workers in parallel and add test tasks to queue. queue <- queue_setup( test_paths = test_paths, test_package = test_package, test_dir = test_dir, load_helpers = load_helpers, num_workers = num_workers, load_package = load_package, shuffle = shuffle ) withr::with_dir(test_dir, { reporters <- test_files_reporter(reporter, "parallel") with_reporter(reporters$multi, { parallel_updates <- reporter$capabilities$parallel_updates if (parallel_updates) { parallel_event_loop_smooth(queue, reporters, ".") } else { parallel_event_loop_chunky(queue, reporters, ".") } }) test_files_check( reporters$list$get_results(), stop_on_failure = stop_on_failure, stop_on_warning = stop_on_warning ) }) } default_num_cpus <- function() { # Use common option, if set ncpus <- getOption("Ncpus", NULL) if (!is.null(ncpus)) { ncpus <- suppressWarnings(as.integer(ncpus)) if (is.na(ncpus)) { cli::cli_abort( "{.code getOption('Ncpus')} must be an integer.", call = NULL ) } return(ncpus) } # Otherwise use env var if set ncpus <- Sys.getenv("TESTTHAT_CPUS", "") if (ncpus != "") { ncpus <- suppressWarnings(as.integer(ncpus)) if (is.na(ncpus)) { cli::cli_abort("{.envvar TESTTHAT_CPUS} must be an integer.") } return(ncpus) } # Otherwise 2 2L } parallel_event_loop_smooth <- function(queue, reporters, test_dir) { update_interval <- 0.1 next_update <- proc.time()[[3]] + update_interval while (!queue$is_idle()) { # How much time do we have to poll before the next UI update? now <- proc.time()[[3]] poll_time <- max(next_update - now, 0) next_update <- now + update_interval msgs <- queue$poll(poll_time) updated <- FALSE for (x in msgs) { if (x$code == PROCESS_OUTPUT) { lns <- paste0("> ", x$path, ": ", x$message) cat("\n", file = stdout()) base::writeLines(lns, stdout()) next } if (x$code != PROCESS_MSG) { next } m <- x$message if (!inherits(m, "testthat_message")) { cli::cli_inform(as.character(m)) next } if (m$cmd != "DONE") { reporters$multi$start_file(m$filename) reporters$multi$start_test(m$context, m$test) if (m$type == "snapshotter") { snapshotter <- getOption("testthat.snapshotter") do.call(snapshotter[[m$cmd]], m$args) } else { do.call(reporters$multi[[m$cmd]], m$args) updated <- TRUE } } } # We need to spin, even if there were no events if (!updated) { reporters$multi$update() } } } parallel_event_loop_chunky <- function(queue, reporters, test_dir) { files <- list() while (!queue$is_idle()) { msgs <- queue$poll(Inf) for (x in msgs) { if (x$code == PROCESS_OUTPUT) { lns <- paste0("> ", x$path, ": ", x$message) base::writeLines(lns, stdout()) next } if (x$code != PROCESS_MSG) { next } m <- x$message if (!inherits(m, "testthat_message")) { cli::cli_inform(as.character(m)) next } # Record all events until we get end of file, then we replay them all # with the local reporters. This prevents out of order reporting. if (m$cmd != "DONE") { files[[m$filename]] <- append(files[[m$filename]], list(m)) } else { replay_events(reporters$multi, files[[m$filename]]) reporters$multi$end_context_if_started() files[[m$filename]] <- NULL } } } } replay_events <- function(reporter, events) { snapshotter <- getOption("testthat.snapshotter") for (m in events) { if (m$type == "snapshotter") { do.call(snapshotter[[m$cmd]], m$args) } else { do.call(reporter[[m$cmd]], m$args) } } } queue_setup <- function( test_paths, test_package, test_dir, num_workers, load_helpers, load_package, shuffle = FALSE ) { # TODO: observe `load_package`, but the "none" default is not # OK for the subprocess, because it'll not have the tested package if (load_package == "none") { load_package <- "source" } # TODO: similarly, load_helpers = FALSE, coming from devtools, # is not appropriate in the subprocess load_helpers <- TRUE test_package <- test_package %||% Sys.getenv("TESTTHAT_PKG") hyperlinks <- cli::ansi_has_hyperlink_support() # First we load the package "manually", in case it is testthat itself load_hook <- expr({ switch( !!load_package, installed = library(!!test_package, character.only = TRUE), source = pkgload::load_all(!!test_dir, helpers = FALSE, quiet = TRUE) ) # Ensure snapshot can generate hyperlinks for snapshot_accept() options( cli.hyperlink = !!hyperlinks, cli.hyperlink_run = !!hyperlinks ) asNamespace("testthat")$queue_process_setup( test_package = !!test_package, test_dir = !!test_dir, load_helpers = !!load_helpers, load_package = "none" ) }) queue <- task_q$new(concurrency = num_workers, load_hook = load_hook) fun <- transport_fun(function(path, shuffle) { asNamespace("testthat")$queue_task(path, shuffle) }) for (path in test_paths) { queue$push(fun, list(path, shuffle)) } queue } queue_process_setup <- function( test_package, test_dir, load_helpers, load_package ) { env <- asNamespace("testthat")$test_files_setup_env( test_package, test_dir, load_package ) asNamespace("testthat")$test_files_setup_state( test_dir = test_dir, test_package = test_package, load_helpers = load_helpers, env = env, frame = .GlobalEnv ) # record testing env for mocks & queue_task # manual implementation of local_testing_env() the$testing_env <- env } queue_task <- function(path, shuffle = FALSE) { withr::local_envvar("TESTTHAT_IS_PARALLEL" = "true") snapshotter <- SubprocessSnapshotReporter$new(snap_dir = "_snaps") withr::local_options(testthat.snapshotter = snapshotter) reporters <- list( SubprocessReporter$new(), snapshotter ) multi <- MultiReporter$new(reporters = reporters) with_reporter( multi, test_one_file(path, env = the$testing_env, shuffle = shuffle) ) NULL } # Clean up subprocesses: we call teardown methods, but we only give them a # second, before killing the whole process tree using ps's env var marker # method. queue_teardown <- function(queue) { if (is.null(queue)) { return() } tasks <- queue$list_tasks() num <- nrow(tasks) # calling quit() here creates a race condition, and the output of # the deferred_run() might be lost. Instead we close the input # connection in a separate task. clean_fn <- function() { withr::deferred_run(.GlobalEnv) } topoll <- integer() for (i in seq_len(num)) { if ( !is.null(tasks$worker[[i]]) && tasks$worker[[i]]$get_state() == "idle" ) { # The worker might have crashed or exited, so this might fail. # If it does then we'll just ignore that worker tryCatch( { tasks$worker[[i]]$call(clean_fn) topoll <- c(topoll, i) }, error = function(e) NULL ) } } # Give covr a bit more time if (in_covr()) { grace <- 30L } else { grace <- 1L } first_error <- NULL limit <- Sys.time() + grace while (length(topoll) > 0 && (timeout <- limit - Sys.time()) > 0) { timeout <- as.double(timeout, units = "secs") * 1000 conns <- lapply(tasks$worker[topoll], function(x) x$get_poll_connection()) pr <- unlist(processx::poll(conns, as.integer(timeout))) for (i in which(pr == "ready")) { msg <- tasks$worker[[topoll[i]]]$read() first_error <- first_error %||% msg$error } topoll <- topoll[pr != "ready"] } topoll <- integer() for (i in seq_len(num)) { if ( !is.null(tasks$worker[[i]]) && tasks$worker[[i]]$get_state() == "idle" ) { tryCatch( { close(tasks$worker[[i]]$get_input_connection()) topoll <- c(topoll, i) }, error = function(e) NULL ) } } limit <- Sys.time() + grace while (length(topoll) > 0 && (timeout <- limit - Sys.time()) > 0) { timeout <- as.double(timeout, units = "secs") * 1000 conns <- lapply(tasks$worker[topoll], function(x) x$get_poll_connection()) pr <- unlist(processx::poll(conns, as.integer(timeout))) topoll <- topoll[pr != "ready"] } for (i in seq_len(num)) { if (!is.null(tasks$worker[[i]])) { if (ps::ps_is_supported()) { tryCatch(tasks$worker[[i]]$kill_tree(), error = function(e) NULL) } else { tryCatch(tasks$worker[[i]]$kill(), error = function(e) NULL) } } } if (!is.null(first_error)) { cli::cli_abort( "At least one parallel worker failed to run teardown", parent = first_error ) } } # Reporter that just forwards events in the subprocess back to the main process # # Ideally, these messages would be throttled, i.e. if the test code # emits many expectation conditions fast, SubprocessReporter should # collect several of them and only emit a condition a couple of times # a second. End-of-test and end-of-file events would be transmitted # immediately. SubprocessReporter <- R6::R6Class( "SubprocessReporter", inherit = Reporter, public = list( start_file = function(filename) { private$filename <- filename private$event("start_file", filename) }, start_test = function(context, test) { private$context <- context private$test <- test private$event("start_test", context, test) }, start_context = function(context) { private$context <- context private$event("start_context", context) }, add_result = function(context, test, result) { if (inherits(result, "expectation_success")) { # Strip bulky components to reduce data transfer cost result[["srcref"]] <- NULL result[["trace"]] <- NULL } private$event("add_result", context, test, result) }, end_test = function(context, test) { private$event("end_test", context, test) }, end_context = function(context) { private$event("end_context", context) }, end_file = function() { private$event("end_file") }, end_reporter = function() { private$event("DONE") } ), private = list( filename = NULL, context = NULL, test = NULL, event = function(cmd, ...) { msg <- list( code = PROCESS_MSG, type = "reporter", cmd = cmd, filename = private$filename, context = private$context, test = private$test, time = proc.time()[[3]], args = list(...) ) class(msg) <- c("testthat_message", "callr_message", "condition") signalCondition(msg) } ) ) testthat/R/colour-text.R0000644000176200001440000000107015047715224014730 0ustar liggesuserscolourise <- function( text, as = c("success", "skip", "warning", "failure", "error") ) { if (has_colour()) { unclass(cli::make_ansi_style(testthat_style(as))(text)) } else { text } } has_colour <- function() { isTRUE(getOption("testthat.use_colours", TRUE)) && cli::num_ansi_colors() > 1 } testthat_style <- function( type = c("success", "skip", "warning", "failure", "error") ) { type <- match.arg(type) c( success = "green", skip = "blue", warning = "magenta", failure = "orange", error = "orange" )[[type]] } testthat/R/expect-named.R0000644000176200001440000000374515072252215015022 0ustar liggesusers#' Do you expect a vector with (these) names? #' #' You can either check for the presence of names (leaving `expected` #' blank), specific names (by supplying a vector of names), or absence of #' names (with `NULL`). #' #' @inheritParams expect_that #' @param expected Character vector of expected names. Leave missing to #' match any names. Use `NULL` to check for absence of names. #' @param ignore.order If `TRUE`, sorts names before comparing to #' ignore the effect of order. #' @param ignore.case If `TRUE`, lowercases all names to ignore the #' effect of case. #' @family expectations #' @export #' @examples #' x <- c(a = 1, b = 2, c = 3) #' expect_named(x) #' expect_named(x, c("a", "b", "c")) #' #' # Use options to control sensitivity #' expect_named(x, c("B", "C", "A"), ignore.order = TRUE, ignore.case = TRUE) #' #' # Can also check for the absence of names with NULL #' z <- 1:4 #' expect_named(z, NULL) expect_named <- function( object, expected, ignore.order = FALSE, ignore.case = FALSE, info = NULL, label = NULL ) { check_bool(ignore.order) check_bool(ignore.case) act <- quasi_label(enquo(object), label) if (missing(expected)) { act_names <- names(act$val) if (is.null(act_names)) { msg <- sprintf("Expected %s to have names.", act$lab) fail(msg) } else { pass() } } else { exp <- quasi_label(enquo(expected), arg = "expected") exp$val <- normalise_names(exp$val, ignore.order, ignore.case) act_names <- labelled_value( normalise_names(names(act$val), ignore.order, ignore.case), act$lab ) msg <- "Expected %s to have names %s." if (ignore.order) { expect_setequal_(msg, act_names, exp) } else { expect_waldo_equal_(msg, act_names, exp) } } invisible(act$val) } normalise_names <- function(x, ignore.order = FALSE, ignore.case = FALSE) { if (is.null(x)) { return() } if (ignore.order) { x <- sort(x) } if (ignore.case) { x <- tolower(x) } x } testthat/R/snapshot-manage.R0000644000176200001440000001403015054335451015526 0ustar liggesusers#' Accept or reject modified snapshots #' #' * `snapshot_accept()` accepts all modified snapshots. #' * `snapshot_reject()` rejects all modified snapshots by deleting the `.new` variants. #' * `snapshot_review()` opens a Shiny app that shows a visual diff of each #' modified snapshot. This is particularly useful for whole file snapshots #' created by `expect_snapshot_file()`. #' #' @param files Optionally, filter effects to snapshots from specified files. #' This can be a snapshot name (e.g. `foo` or `foo.md`), a snapshot file name #' (e.g. `testfile/foo.txt`), or a snapshot file directory (e.g. `testfile/`). #' #' @param path Path to tests. #' @export snapshot_accept <- function(files = NULL, path = "tests/testthat") { changed <- snapshot_meta(files, path) if (nrow(changed) == 0) { cli::cli_inform("No snapshots to update.") return(invisible()) } cli::cli_inform("Updating snapshots: {.path {changed$name}}.") unlink(changed$cur) file.rename(changed$new, changed$cur) rstudio_tickle() invisible() } #' @rdname snapshot_accept #' @export snapshot_reject <- function(files = NULL, path = "tests/testthat") { changed <- snapshot_meta(files, path) if (nrow(changed) == 0) { inform("No snapshots to reject") return(invisible()) } inform(c("Rejecting snapshots:", changed$name)) unlink(changed$new) rstudio_tickle() invisible() } #' @rdname snapshot_accept #' @param ... Additional arguments passed on to [shiny::runApp()]. #' @export snapshot_review <- function(files = NULL, path = "tests/testthat", ...) { check_installed(c("shiny", "diffviewer"), "to use snapshot_review()") changed <- snapshot_meta(files, path) if (nrow(changed) == 0) { cli::cli_inform("No snapshots to update.") return(invisible()) } review_app(changed$name, changed$cur, changed$new, ...) rstudio_tickle() invisible() } review_app <- function(name, old_path, new_path, ...) { stopifnot( length(name) == length(old_path), length(old_path) == length(new_path) ) n <- length(name) case_index <- stats::setNames(seq_along(name), name) handled <- rep(FALSE, n) ui <- shiny::fluidPage( style = "margin: 0.5em", shiny::fluidRow( style = "display: flex; margin-bottom: 0.5em", shiny::div( style = "flex: 1 1", if (n > 1) shiny::selectInput("cases", NULL, case_index, width = "100%") ), shiny::div( class = "btn-group", style = "margin-left: 1em; flex: 0 0 auto", shiny::actionButton("reject", "Reject", class = "btn-danger"), if (n > 1) shiny::actionButton("skip", "Skip"), shiny::actionButton("accept", "Accept", class = "btn-success"), ) ), shiny::fluidRow( diffviewer::visual_diff_output("diff") ) ) server <- function(input, output, session) { i <- shiny::reactive(if (n == 1) 1L else as.numeric(input$cases)) output$diff <- diffviewer::visual_diff_render({ diffviewer::visual_diff(old_path[[i()]], new_path[[i()]]) }) # Can't skip if there's only one file to review shiny::updateActionButton(session, "skip", disabled = (n <= 1)) # Handle buttons - after clicking update move input$cases to next case, # and remove current case (for accept/reject). If no cases left, close app shiny::observeEvent(input$reject, { cli::cli_inform("Rejecting snapshot: {.path {new_path[[i()]]}}.") unlink(new_path[[i()]]) update_cases() }) shiny::observeEvent(input$accept, { cli::cli_inform("Accepting snapshot: {.path {old_path[[i()]]}}.") file.rename(new_path[[i()]], old_path[[i()]]) update_cases() }) shiny::observeEvent(input$skip, { i <- next_case() shiny::updateSelectInput(session, "cases", selected = i) }) update_cases <- function() { handled[[i()]] <<- TRUE i <- next_case() shiny::updateSelectInput( session, "cases", choices = case_index[!handled], selected = i ) n_left <- sum(!handled) # Disable skip button if only one case remains shiny::updateActionButton(session, "skip", disabled = (n_left <= 1)) } next_case <- function() { if (all(handled)) { cli::cli_inform("Review complete.") shiny::stopApp() return() } # Find next case; remaining <- case_index[!handled] next_cases <- which(remaining > i()) if (length(next_cases) == 0) { remaining[[1]] } else { remaining[[next_cases[[1]]]] } } } cli::cli_inform(c( "Starting Shiny app for snapshot review.", i = "Use {.kbd Ctrl + C} to quit." )) shiny::runApp( shiny::shinyApp(ui, server), quiet = TRUE, launch.browser = shiny::paneViewer(), ... ) invisible() } # helpers ----------------------------------------------------------------- snapshot_meta <- function(files = NULL, path = "tests/testthat") { all <- dir(file.path(path, "_snaps"), recursive = TRUE, full.names = TRUE) cur <- all[!grepl("\\.new\\.", all)] snap_file <- basename(dirname(cur)) != "_snaps" snap_test <- ifelse( snap_file, basename(dirname(cur)), gsub("\\.md$", "", basename(cur)) ) if (length(cur) == 0) { new <- character() } else { new <- paste0(tools::file_path_sans_ext(cur), ".new.", tools::file_ext(cur)) new[!file.exists(new)] <- NA } snap_name <- ifelse( snap_file, file.path(snap_test, basename(cur)), basename(cur) ) out <- data.frame( test = snap_test, name = snap_name, cur = cur, new = new, stringsAsFactors = FALSE ) out <- out[!is.na(out$new), , drop = FALSE] out <- out[order(out$test, out$cur), , drop = FALSE] rownames(out) <- NULL if (!is.null(files)) { is_dir <- substr(files, nchar(files), nchar(files)) == "/" dirs <- files[is_dir] files <- files[!is_dir] dirs <- substr(dirs, 1, nchar(dirs) - 1) # Match regardless of whether user include .md or not files <- c(files, paste0(files, ".md")) out <- out[out$name %in% files | out$test %in% dirs, , drop = FALSE] } out } testthat/R/expect-inheritance.R0000644000176200001440000002013015072252215016212 0ustar liggesusers#' Do you expect an S3/S4/R6/S7 object that inherits from this class? #' #' @description #' See for an overview of R's OO systems, and #' the vocabulary used here. #' #' * `expect_type(x, type)` checks that `typeof(x)` is `type`. #' * `expect_s3_class(x, class)` checks that `x` is an S3 object that #' [inherits()] from `class` #' * `expect_s3_class(x, NA)` checks that `x` isn't an S3 object. #' * `expect_s4_class(x, class)` checks that `x` is an S4 object that #' [is()] `class`. #' * `expect_s4_class(x, NA)` checks that `x` isn't an S4 object. #' * `expect_r6_class(x, class)` checks that `x` an R6 object that #' inherits from `class`. #' * `expect_s7_class(x, Class)` checks that `x` is an S7 object that #' [S7::S7_inherits()] from `Class` #' #' See [expect_vector()] for testing properties of objects created by vctrs. #' #' @param type String giving base type (as returned by [typeof()]). #' @param class The required type varies depending on the function: #' * `expect_type()`: a string. #' * `expect_s3_class()`: a string or character vector. The behaviour of #' multiple values (i.e. a character vector) is controlled by the #' `exact` argument. #' * `expect_s4_class()`: a string. #' * `expect_r6_class()`: a string. #' * `expect_s7_class()`: an [S7::S7_class()] object. #' #' For historical reasons, `expect_s3_class()` and `expect_s4_class()` also #' take `NA` to assert that the `object` is not an S3 or S4 object. #' @inheritParams expect_that #' @family expectations #' @examples #' x <- data.frame(x = 1:10, y = "x", stringsAsFactors = TRUE) #' # A data frame is an S3 object with class data.frame #' expect_s3_class(x, "data.frame") #' show_failure(expect_s4_class(x, "data.frame")) #' # A data frame is built from a list: #' expect_type(x, "list") #' #' f <- factor(c("a", "b", "c")) #' o <- ordered(f) #' #' # Using multiple class names tests if the object inherits from any of them #' expect_s3_class(f, c("ordered", "factor")) #' # Use exact = TRUE to test for exact match #' show_failure(expect_s3_class(f, c("ordered", "factor"), exact = TRUE)) #' expect_s3_class(o, c("ordered", "factor"), exact = TRUE) #' #' # An integer vector is an atomic vector of type "integer" #' expect_type(x$x, "integer") #' # It is not an S3 object #' show_failure(expect_s3_class(x$x, "integer")) #' #' # Above, we requested data.frame() converts strings to factors: #' show_failure(expect_type(x$y, "character")) #' expect_s3_class(x$y, "factor") #' expect_type(x$y, "integer") #' @name inheritance-expectations NULL #' @export #' @rdname inheritance-expectations expect_type <- function(object, type) { check_string(type) act <- quasi_label(enquo(object)) act_type <- typeof(act$val) if (!identical(act_type, type)) { fail(c( sprintf("Expected %s to have type %s.", act$lab, format_class(type)), sprintf("Actual type: %s", format_class(act_type)) )) } else { pass() } invisible(act$val) } #' @export #' @rdname inheritance-expectations #' @param exact If `FALSE`, the default, checks that `object` inherits #' from any element of `class`. If `TRUE`, checks that object has a class #' that exactly matches `class`. expect_s3_class <- function(object, class, exact = FALSE) { check_bool(exact) act <- quasi_label(enquo(object)) act$class <- format_class(class(act$val)) exp_lab <- format_class(class) if (identical(class, NA)) { if (isS3(object)) { fail(sprintf("Expected %s not to be an S3 object.", act$lab)) } else { pass() } } else if (is.character(class)) { if (!isS3(act$val)) { fail(c( sprintf("Expected %s to be an S3 object.", act$lab), sprintf("Actual OO type: %s.", oo_type(act$val)) )) } else if (exact && !identical(class(act$val), class)) { fail(c( sprintf("Expected %s to have class %s.", act$lab, exp_lab), sprintf("Actual class: %s.", act$class) )) } else if (!inherits(act$val, class)) { fail(c( sprintf("Expected %s to inherit from %s.", act$lab, exp_lab), sprintf("Actual class: %s.", act$class) )) } else { pass() } } else { stop_input_type(class, c("a character vector", "NA")) } invisible(act$val) } #' @export #' @rdname inheritance-expectations expect_s4_class <- function(object, class) { act <- quasi_label(enquo(object)) act$class <- format_class(methods::is(act$val)) exp_lab <- format_class(class) if (identical(class, NA)) { if (isS4(object)) { fail(sprintf("Expected %s not to be an S4 object.", act$lab)) } else { pass() } } else if (is.character(class)) { if (!isS4(act$val)) { fail(c( sprintf("Expected %s to be an S4 object.", act$lab), sprintf("Actual OO type: %s.", oo_type(act$val)) )) } else if (!methods::is(act$val, class)) { fail(c( sprintf("Expected %s to inherit from %s.", act$lab, exp_lab), sprintf("Actual class: %s.", act$class) )) } else { pass() } } else { stop_input_type(class, c("a character vector", "NA")) } invisible(act$val) } #' @export #' @rdname inheritance-expectations expect_r6_class <- function(object, class) { act <- quasi_label(enquo(object)) check_string(class) if (!inherits(act$val, "R6")) { fail(c( sprintf("Expected %s to be an R6 object.", act$lab), sprintf("Actual OO type: %s.", oo_type(act$val)) )) } else if (!inherits(act$val, class)) { act_class <- format_class(class(act$val)) exp_class <- format_class(class) fail(c( sprintf("Expected %s to inherit from %s.", act$lab, exp_class), sprintf("Actual class: %s.", act_class) )) } else { pass() } invisible(act$val) } #' @export #' @rdname inheritance-expectations expect_s7_class <- function(object, class) { check_installed("S7") if (!inherits(class, "S7_class")) { stop_input_type(class, "an S7 class object") } act <- quasi_label(enquo(object)) if (!S7::S7_inherits(object)) { fail(c( sprintf("Expected %s to be an S7 object.", act$lab), sprintf("Actual OO type: %s.", oo_type(act$val)) )) } else if (!S7::S7_inherits(object, class)) { exp_class <- attr(class, "name", TRUE) act_class <- setdiff(base::class(object), "S7_object") act_class_desc <- paste0("<", act_class, ">", collapse = "/") fail(c( sprintf("Expected %s to inherit from <%s>.", act$lab, exp_class), sprintf("Actual class: %s.", act_class_desc) )) } else { pass() } invisible(act$val) } #' Do you expect to inherit from this class? #' #' @description #' `r lifecycle::badge("superseded")` #' #' `expect_is()` is an older form that uses [inherits()] without checking #' whether `x` is S3, S4, or neither. Instead, I'd recommend using #' [expect_type()], [expect_s3_class()], or [expect_s4_class()] to more clearly #' convey your intent. #' #' @section 3rd edition: #' `r lifecycle::badge("deprecated")` #' #' `expect_is()` is formally deprecated in the 3rd edition. #' #' @keywords internal #' @param class Class name passed to `inherits()`. #' @inheritParams expect_type #' @export expect_is <- function(object, class, info = NULL, label = NULL) { check_character(class) edition_deprecate( 3, "expect_is()", "Use `expect_type()`, `expect_s3_class()`, or `expect_s4_class()` instead" ) act <- quasi_label(enquo(object), label) act$class <- format_class(class(act$val)) exp_lab <- format_class(class(class)) if (!inherits(act$val, class)) { msg <- sprintf( "Expected %s to inherit from %s.\nActual inheritance: %s", act$lab, exp_lab, act$class ) fail(msg, info = info) } else { pass() } invisible(act$val) } # Helpers ---------------------------------------------------------------------- isS3 <- function(x) is.object(x) && !isS4(x) format_class <- function(x) { paste0(encodeString(x, quote = '"'), collapse = "/") } oo_type <- function(x) { if (!is.object(x)) { return("none") } if (isS4(x)) { "S4" } else { if (inherits(x, "R6")) { "R6" } else if (inherits(x, "S7")) { "S7" } else { "S3" } } } testthat/R/expect-vector.R0000644000176200001440000000266515072252215015240 0ustar liggesusers#' Do you expect a vector with this size and/or prototype? #' #' `expect_vector()` is a thin wrapper around [vctrs::vec_assert()], converting #' the results of that function in to the expectations used by testthat. This #' means that it used the vctrs of `ptype` (prototype) and `size`. See #' details in #' #' @inheritParams expect_that #' @param ptype (Optional) Vector prototype to test against. Should be a #' size-0 (empty) generalised vector. #' @param size (Optional) Size to check for. #' @export #' @examplesIf requireNamespace("vctrs") #' expect_vector(1:10, ptype = integer(), size = 10) #' show_failure(expect_vector(1:10, ptype = integer(), size = 5)) #' show_failure(expect_vector(1:10, ptype = character(), size = 5)) expect_vector <- function(object, ptype = NULL, size = NULL) { check_installed("vctrs") check_number_whole(size, min = 0, allow_null = TRUE) act <- quasi_label(enquo(object)) # vec_assert() automatically adds backticks so we hack out the ones # added by as_label() act$lab <- gsub("^`|`$", "", act$lab) failed <- FALSE withCallingHandlers( vctrs::vec_assert(act$val, ptype = ptype, size = size, arg = act$lab), vctrs_error_scalar_type = function(e) { failed <<- TRUE fail(e$message) }, vctrs_error_assert = function(e) { failed <<- TRUE fail(e$message) } ) if (!failed) { pass() } invisible(act$val) } testthat/R/reporter-summary.R0000644000176200001440000001050015047715224015776 0ustar liggesusers#' Report a summary of failures #' #' @description #' This is designed for interactive usage: it lets you know which tests have #' run successfully and as well as fully reporting information about #' failures and errors. #' #' You can use the `max_reports` field to control the maximum number #' of detailed reports produced by this reporter. #' #' As an additional benefit, this reporter will praise you from time-to-time #' if all your tests pass. #' #' @export #' @family reporters SummaryReporter <- R6::R6Class( "SummaryReporter", inherit = Reporter, public = list( failures = NULL, skips = NULL, warnings = NULL, max_reports = NULL, show_praise = TRUE, omit_dots = FALSE, initialize = function( show_praise = TRUE, omit_dots = getOption("testthat.summary.omit_dots"), max_reports = getOption("testthat.summary.max_reports", 10L), ... ) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$failures <- Stack$new() self$skips <- Stack$new() self$warnings <- Stack$new() self$max_reports <- max_reports self$show_praise <- show_praise self$omit_dots <- omit_dots }, is_full = function() { self$failures$size() >= self$max_reports }, start_file = function(file) { context_start_file(file) }, start_context = function(context) { self$cat_tight(context, ": ") }, end_context = function(context) { self$cat_line() }, add_result = function(context, test, result) { if (expectation_broken(result)) { self$failures$push(result) } else if (expectation_skip(result)) { self$skips$push(result) } else if (expectation_warning(result)) { self$warnings$push(result) } else { if (isTRUE(self$omit_dots)) { return() } } self$cat_tight(private$get_summary(result)) }, end_reporter = function() { skips <- self$skips$as_list() failures <- self$failures$as_list() warnings <- self$warnings$as_list() self$cat_line() private$cat_reports("Skipped", skips, Inf, skip_summary) private$cat_reports("Warnings", warnings, Inf, skip_summary) private$cat_reports("Failed", failures, self$max_reports, failure_summary) if (self$failures$size() >= self$max_reports) { self$cat_line( "Maximum number of ", self$max_reports, " failures reached, ", "some test results may be missing." ) self$cat_line() } self$rule("DONE", line = 2) if (self$show_praise) { if (length(failures) == 0 && stats::runif(1) < 0.1) { self$cat_line(colourise(praise(), "success")) } if (length(failures) > 0 && stats::runif(1) < 0.25) { self$cat_line(colourise(encourage(), "error")) } } } ), private = list( get_summary = function(result) { if (expectation_broken(result)) { if (self$failures$size() <= length(labels)) { return(colourise(labels[self$failures$size()], "error")) } } single_letter_summary(result) }, cat_reports = function( header, expectations, max_n, summary_fun, collapse = "\n\n" ) { n <- length(expectations) if (n == 0L) { return() } self$rule(header, line = 2) if (n > max_n) { expectations <- expectations[seq_len(max_n)] } labels <- seq_along(expectations) exp_summary <- function(i) { summary_fun(expectations[[i]], labels[i]) } report_summary <- map_chr(seq_along(expectations), exp_summary) self$cat_tight(paste(report_summary, collapse = collapse)) if (n > max_n) { self$cat_line() self$cat_line(" ... and ", n - max_n, " more") } self$cat_line() self$cat_line() } ) ) labels <- c(1:9, letters, LETTERS) skip_summary <- function(x, label) { header <- paste0(label, ". ", x$test) paste0( colourise(header, "skip"), expectation_location(x, " (", ")"), " - ", x$message ) } failure_summary <- function(x, label, width = cli::console_width()) { header <- paste0(label, ". ", issue_header(x)) paste0( cli::rule(header, col = testthat_style("error")), "\n", format(x) ) } testthat/R/snapshot-file-snaps.R0000644000176200001440000000521015054053615016336 0ustar liggesusers# Manage a test files worth of snapshots - if the test file uses variants, this # will correspond to multiple output files. FileSnaps <- R6::R6Class( "FileSnaps", public = list( snap_path = NULL, file = NULL, type = NULL, snaps = NULL, initialize = function(snap_path, file, type = c("old", "cur", "new")) { self$snap_path <- snap_path self$file <- file self$type <- arg_match(type) if (self$type == "old") { # Find variants variants <- c("_default", dirs(self$snap_path)) paths <- set_names(self$path(variants), variants) paths <- paths[file.exists(paths)] self$snaps <- lapply(paths, read_snaps) } else { self$snaps <- list(`_default` = list()) } }, get = function(test, variant, i) { test_snaps <- self$snaps[[variant]][[test]] if (i > length(test_snaps)) { NULL } else { test_snaps[[i]] } }, set = function(test, variant, i, data) { self$snaps[[variant]][[test]][[i]] <- data }, append = function(test, variant, data) { self$snaps[[variant]][[test]] <- c(self$snaps[[variant]][[test]], data) length(self$snaps[[variant]][[test]]) }, reset = function(test, old) { for (variant in names(self$snaps)) { cur_test <- self$snaps[[variant]][[test]] old_test <- old$snaps[[variant]][[test]] if (length(cur_test) == 0) { self$snaps[[variant]][[test]] <- old_test } else if (length(old_test) > length(cur_test)) { self$snaps[[variant]][[test]] <- c( cur_test, old_test[-seq_along(cur_test)] ) } } invisible() }, write = function(variants = names(self$snaps)) { for (variant in variants) { default <- variant == "_default" if (!default) { dir.create(file.path(self$snap_path, variant), showWarnings = FALSE) } write_snaps( self$snaps[[variant]], self$path(variant), delete = default ) } invisible() }, delete = function(variant = "_default") { unlink(self$path(variant)) invisible() }, variants = function() { names(self$snaps) }, filename = function() { paste0(self$file, if (self$type == "new") ".new", ".md") }, path = function(variant = "_default") { ifelse( variant == "_default", file.path(self$snap_path, self$filename()), file.path(self$snap_path, variant, self$filename()) ) } ) ) dirs <- function(path) { list.dirs(path, recursive = FALSE, full.names = FALSE) } testthat/R/skip.R0000644000176200001440000002151515104635341013412 0ustar liggesusers#' Skip a test for various reasons #' #' @description #' `skip_if()` and `skip_if_not()` allow you to skip tests, immediately #' concluding a [test_that()] block without executing any further expectations. #' This allows you to skip a test without failure, if for some reason it #' can't be run (e.g. it depends on the feature of a specific operating system, #' or it requires a specific version of a package). #' #' See `vignette("skipping")` for more details. #' #' @section Helpers: #' #' * `skip_if_not_installed("pkg")` skips tests if package "pkg" is not #' installed or cannot be loaded (using `requireNamespace()`). Generally, #' you can assume that suggested packages are installed, and you do not #' need to check for them specifically, unless they are particularly #' difficult to install. #' #' * `skip_if_offline()` skips if an internet connection is not available #' (using [curl::nslookup()]) or if the test is run on CRAN. Requires #' \{curl\} to be installed and included in the dependencies of your package. #' #' * `skip_if_translated("msg")` skips tests if the "msg" is translated. #' #' * `skip_on_bioc()` skips on Bioconductor (using the `IS_BIOC_BUILD_MACHINE` #' env var). #' #' * `skip_on_cran()` skips on CRAN (using the `NOT_CRAN` env var set by #' devtools and friends). `local_on_cran()` gives you the ability to #' easily simulate what will happen on CRAN. #' #' * `skip_on_covr()` skips when covr is running (using the `R_COVR` env var). #' #' * `skip_on_ci()` skips on continuous integration systems like GitHub Actions, #' travis, and appveyor (using the `CI` env var). #' #' * `skip_on_os()` skips on the specified operating system(s) ("windows", #' "mac", "linux", or "solaris"). #' #' @param message A message describing why the test was skipped. #' @param host A string with a hostname to lookup #' @export #' @examples #' if (FALSE) skip("Some Important Requirement is not available") #' #' test_that("skip example", { #' expect_equal(1, 1L) # this expectation runs #' skip('skip') #' expect_equal(1, 2) # this one skipped #' expect_equal(1, 3) # this one is also skipped #' }) skip <- function(message = "Skipping") { message <- paste0(message, collapse = "\n") cond <- structure( list(message = paste0("Reason: ", message)), class = c("skip", "condition") ) stop(cond) } # Called automatically if the test contains no expectations skip_empty <- function() { cond <- structure( list(message = "Reason: empty test"), class = c("skip_empty", "skip", "condition") ) stop(cond) } #' @export #' @rdname skip #' @param condition Boolean condition to check. `skip_if_not()` will skip if #' `FALSE`, `skip_if()` will skip if `TRUE`. skip_if_not <- function(condition, message = NULL) { if (is.null(message)) { message <- paste0(deparse1(substitute(condition)), " is not TRUE") } if (!isTRUE(condition)) { skip(message) } else { invisible() } } #' @export #' @rdname skip skip_if <- function(condition, message = NULL) { if (is.null(message)) { message <- paste0(deparse1(substitute(condition)), " is TRUE") } if (isTRUE(condition)) { skip(message) } else { invisible() } } #' @export #' @param pkg Name of package to check for #' @param minimum_version Minimum required version for the package #' @rdname skip skip_if_not_installed <- function(pkg, minimum_version = NULL) { # most common case: it's not installed tryCatch( find.package(pkg), error = function(e) skip(paste0("{", pkg, "} is not installed")) ) # rarer: it's installed, but fails to load if (!requireNamespace(pkg, quietly = TRUE)) { skip(paste0("{", pkg, "} cannot be loaded")) } if (!is.null(minimum_version)) { installed_version <- package_version(pkg) if (installed_version < minimum_version) { skip(paste0( "Installed ", pkg, " is version ", installed_version, "; ", "but ", minimum_version, " is required" )) } } invisible() } package_version <- function(x) { utils::packageVersion(x) } #' @export #' @param spec A version specification like '>= 4.1.0' denoting that this test #' should only be run on R versions 4.1.0 and later. #' @rdname skip skip_unless_r <- function(spec) { check_string(spec) parts <- unlist(strsplit(spec, " ", fixed = TRUE)) if (length(parts) != 2L) { cli::cli_abort( "{.arg spec} must be an valid version specification, like {.str >= 4.0.0}, not {.str {spec}}." ) } comparator <- match.fun(parts[1L]) required_version <- numeric_version(parts[2L]) current_version <- getRversion() skip_if_not( comparator(current_version, required_version), sprintf( "Current R version (%s) does not satisfy requirement (%s %s)", current_version, parts[1L], required_version ) ) } # for mocking getRversion <- NULL #' @export #' @rdname skip skip_if_offline <- function(host = "captive.apple.com") { skip_on_cran() check_installed("curl") skip_if_not(has_internet(host), "offline") } has_internet <- function(host) { !is.null(curl::nslookup(host, error = FALSE)) } #' @export #' @rdname skip skip_on_cran <- function() { skip_if(on_cran(), "On CRAN") } #' @export #' @rdname skip #' @param on_cran Pretend we're on CRAN (`TRUE`) or not (`FALSE`). #' @param frame Calling frame to tie change to; expect use only. local_on_cran <- function(on_cran = TRUE, frame = caller_env()) { check_bool(on_cran) withr::local_envvar(NOT_CRAN = tolower(!on_cran), .local_envir = frame) } # Assert that we're not on CRAN, but don't override the user's setting local_assume_not_on_cran <- function(frame = caller_env()) { if (Sys.getenv("NOT_CRAN") != "") { return() } withr::local_envvar("NOT_CRAN" = "true", .local_envir = frame) } #' @export #' @param os Character vector of one or more operating systems to skip on. #' Supported values are `"windows"`, `"mac"`, `"linux"`, `"solaris"`, #' and `"emscripten"`. #' @param arch Character vector of one or more architectures to skip on. #' Common values include `"i386"` (32 bit), `"x86_64"` (64 bit), and #' `"aarch64"` (M1 mac). Supplying `arch` makes the test stricter; i.e. both #' `os` and `arch` must match in order for the test to be skipped. #' @rdname skip skip_on_os <- function(os, arch = NULL) { os <- match.arg( os, choices = c("windows", "mac", "linux", "solaris", "emscripten"), several.ok = TRUE ) msg <- switch( system_os(), windows = if ("windows" %in% os) "On Windows", darwin = if ("mac" %in% os) "On Mac", linux = if ("linux" %in% os) "On Linux", sunos = if ("solaris" %in% os) "On Solaris", emscripten = if ("emscripten" %in% os) "On Emscripten" ) if (!is.null(arch) && !is.null(msg)) { if (!is.character(arch)) { cli::cli_abort("{.arg arch} must be a character vector.") } if (system_arch() %in% arch) { msg <- paste(msg, system_arch()) } else { msg <- NULL } } if (is.null(msg)) { invisible(TRUE) } else { skip(msg) } } system_os <- function() tolower(Sys.info()[["sysname"]]) system_arch <- function() R.version$arch #' @export #' @rdname skip skip_on_ci <- function() { skip_if(on_ci(), "On CI") } #' @export #' @rdname skip skip_on_covr <- function() { skip_if(in_covr(), "On covr") } #' @export #' @rdname skip skip_on_bioc <- function() { skip_if(on_bioc(), "On Bioconductor") } #' @export #' @param msgid R message identifier used to check for translation: the default #' uses a message included in most translation packs. See the complete list in #' [`R-base.pot`](https://github.com/wch/r-source/blob/master/src/library/base/po/R-base.pot). #' @rdname skip skip_if_translated <- function(msgid = "'%s' not found") { skip_if( gettext(msgid) != msgid, paste0("\"", msgid, "\" is translated") ) } gettext <- function(msgid, domain = "R") { base::gettext(msgid, domain = domain) } #' Superseded skip functions #' #' @description #' `r lifecycle::badge("superseded")` #' #' * `skip_on_travis()` and `skip_on_appveyor()` have been superseded by #' [skip_on_ci()]. #' #' @export #' @keywords internal skip_on_travis <- function() { skip_if(env_var_is_true("TRAVIS"), "On Travis") } #' @export #' @rdname skip_on_travis skip_on_appveyor <- function() { skip_if(env_var_is_true("APPVEYOR"), "On Appveyor") } # helpers ----------------------------------------------------------------- on_ci <- function() { env_var_is_true("CI") } in_covr <- function() { env_var_is_true("R_COVR") } on_bioc <- function() { env_var_is_true("IS_BIOC_BUILD_MACHINE") } on_cran <- function() { env <- Sys.getenv("NOT_CRAN") if (identical(env, "")) { !interactive() } else { !isTRUE(as.logical(env)) } } env_var_is_true <- function(x) { isTRUE(as.logical(Sys.getenv(x, "false"))) } env_var_is_false <- function(x) { isFALSE(as.logical(Sys.getenv(x, "true"))) } testthat/R/test-that.R0000644000176200001440000001726115127460763014375 0ustar liggesusers#' Run a test #' #' @description #' A test encapsulates a series of expectations about a small, self-contained #' unit of functionality. Each test contains one or more expectations, such as #' [expect_equal()] or [expect_error()], and lives in a `test/testhat/test*` #' file, often together with other tests that relate to the same function or set #' of functions. #' #' Each test has its own execution environment, so an object created in a test #' also dies with the test. Note that this cleanup does not happen automatically #' for other aspects of global state, such as session options or filesystem #' changes. Avoid changing global state, when possible, and reverse any changes #' that you do make. #' #' @param desc Test name. Names should be brief, but evocative. It's common to #' write the description so that it reads like a natural sentence, e.g. #' `test_that("multiplication works", { ... })`. #' @param code Test code containing expectations. Braces (`{}`) should always #' be used in order to get accurate location data for test failures. #' @return When run interactively, returns `invisible(TRUE)` if all tests #' pass, otherwise throws an error. #' @export #' @examples #' test_that("trigonometric functions match identities", { #' expect_equal(sin(pi / 4), 1 / sqrt(2)) #' expect_equal(cos(pi / 4), 1 / sqrt(2)) #' expect_equal(tan(pi / 4), 1) #' }) #' #' \dontrun{ #' test_that("trigonometric functions match identities", { #' expect_equal(sin(pi / 4), 1) #' }) #' } test_that <- function(desc, code) { local_description_push(desc) code <- substitute(code) test_code(code, parent.frame()) } # Access error fields with `[[` rather than `$` because the # `$.Throwable` from the rJava package throws with unknown fields test_code <- function(code, env, reporter = NULL, skip_on_empty = TRUE) { # Must initialise interactive reporter before local_test_context() reporter <- get_reporter() %||% local_interactive_reporter() local_test_context() frame <- caller_env() otel_n_success <- 0L otel_n_failure <- 0L otel_n_error <- 0L otel_n_skip <- 0L otel_n_warning <- 0L test <- test_description() if (!is.null(test)) { span <- otel_local_test_span(test, scope = frame) reporter$start_test(context = reporter$.context, test = test) withr::defer({ otel_update_span( span, otel_n_success, otel_n_failure, otel_n_error, otel_n_skip, otel_n_warning ) reporter$end_test(context = reporter$.context, test = test) }) } if (the$top_level_test) { # Not strictly necessary but nice to reset the count the$test_expectations <- 0 the$top_level_test <- FALSE withr::defer(the$top_level_test <- TRUE) } # Used to skip if the test _and_ its subtests have no expectations starting_expectations <- the$test_expectations ok <- TRUE snapshot_skipped <- FALSE # @param debug_end How many frames should be skipped to find the # last relevant frame call. Only useful for the DebugReporter. register_expectation <- function(e, debug_end) { srcref <- e[["srcref"]] %||% find_expectation_srcref(frame) e <- as.expectation(e, srcref = srcref) # Data for the DebugReporter if (debug_end >= 0) { start <- eval_bare(quote(base::sys.nframe()), test_env) + 1L e$start_frame <- start e$end_frame <- sys.nframe() - debug_end - 1L } e$test <- test %||% "(code run outside of `test_that()`)" # record keeping for otel switch( expectation_type(e), success = otel_n_success <<- otel_n_success + 1L, failure = otel_n_failure <<- otel_n_failure + 1L, error = otel_n_error <<- otel_n_error + 1L, skip = otel_n_skip <<- otel_n_skip + 1L, warning = otel_n_warning <<- otel_n_warning + 1L, NULL ) ok <<- ok && expectation_ok(e) reporter$add_result(context = reporter$.context, test = test, result = e) } expressions_opt <- getOption("expressions") expressions_opt_new <- min(expressions_opt + 500L, 500000L) handle_error <- function(e) { the$test_expectations <- the$test_expectations + 1L # Increase option(expressions) to handle errors here if possible, even in # case of a stack overflow. This is important for the DebugReporter. local_options(expressions = expressions_opt_new) # Add structured backtrace to the expectation if (can_entrace(e)) { e <- cnd_entrace(e) } register_expectation(e, 2) invokeRestart("end_test") } handle_fatal <- function(e) { the$test_expectations <- the$test_expectations + 1L register_expectation(e, 0) } handle_expectation <- function(e) { the$test_expectations <- the$test_expectations + 1L register_expectation(e, 11) invokeRestart("muffle_expectation") } handle_warning <- function(e) { # When options(warn) < 0, warnings are expected to be ignored. if (getOption("warn") < 0) { return() } # When options(warn) >= 2, warnings are converted to errors. # So, do not handle it here so that it will be handled by handle_error. if (getOption("warn") >= 2) { return() } if (!inherits(e, "testthat_warn")) { e <- cnd_entrace(e) } register_expectation(e, 5) tryInvokeRestart("muffleWarning") } handle_message <- function(e) { if (edition_get() < 3) { tryInvokeRestart("muffleMessage") } } handle_skip <- function(e) { the$test_expectations <- the$test_expectations + 1L debug_end <- if (inherits(e, "skip_empty")) -1 else 2 register_expectation(e, debug_end) invokeRestart("end_test") } handle_interrupt <- function(e) { if (!is.null(test)) { cat("\n") cli::cli_inform(c("!" = "Interrupting test: {test}")) } } test_env <- new.env(parent = env) old <- options(rlang_trace_top_env = test_env)[[1]] withr::defer(options(rlang_trace_top_env = old)) withr::local_options(testthat_topenv = test_env) before <- inspect_state() withRestarts( tryCatch( withCallingHandlers( { eval(code, test_env) new_expectations <- the$test_expectations > starting_expectations if (snapshot_skipped) { skip("On CRAN") } else if (!new_expectations && skip_on_empty) { skip_empty() } }, expectation = handle_expectation, packageNotFoundError = function(e) { if (on_cran()) { skip(paste0("{", e$package, "} is not installed.")) } }, snapshot_on_cran = function(cnd) { snapshot_skipped <<- TRUE invokeRestart("muffle_cran_snapshot") }, skip = handle_skip, warning = handle_warning, message = handle_message, error = handle_error, interrupt = handle_interrupt ), # some errors may need handling here, e.g., stack overflow error = handle_fatal ), end_test = function() {} ) after <- inspect_state() if (!is.null(test)) { cnd <- testthat_state_condition(before, after, call = sys.call(-1)) if (!is.null(cnd)) { register_expectation(cnd, 0) } } invisible(ok) } # Maintain a stack of descriptions local_description_push <- function(description, frame = caller_env()) { check_string(description, call = frame) local_description_set(c(the$description, description), frame = frame) } local_description_set <- function( description = character(), frame = caller_env() ) { check_character(description, call = frame) old <- the$description the$description <- description withr::defer(the$description <- old, frame) invisible(old) } test_description <- function(desc = the$description) { if (length(desc) == 0) { NULL } else { paste(desc, collapse = " / ") } } testthat/R/expect-all.R0000644000176200001440000000361315072252215014500 0ustar liggesusers#' Do you expect every value in a vector to have this value? #' #' These expectations are similar to `expect_true(all(x == "x"))`, #' `expect_true(all(x))` and `expect_true(all(!x))` but give more informative #' failure messages if the expectations are not met. #' #' @inheritParams expect_equal #' @export #' @examples #' x1 <- c(1, 1, 1, 1, 1, 1) #' expect_all_equal(x1, 1) #' #' x2 <- c(1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2) #' show_failure(expect_all_equal(x2, 1)) #' #' # expect_all_true() and expect_all_false() are helpers for common cases #' set.seed(1016) #' show_failure(expect_all_true(rpois(100, 10) < 20)) #' show_failure(expect_all_false(rpois(100, 10) > 20)) expect_all_equal <- function(object, expected) { act <- quasi_label(enquo(object)) exp <- quasi_label(enquo(expected)) expect_all_equal_(act, exp) invisible(act$val) } #' @export #' @rdname expect_all_equal expect_all_true <- function(object) { act <- quasi_label(enquo(object)) exp <- labelled_value(TRUE, "TRUE") expect_all_equal_(act, exp) invisible(act$val) } #' @export #' @rdname expect_all_equal expect_all_false <- function(object) { act <- quasi_label(enquo(object)) exp <- labelled_value(FALSE, "FALSE") expect_all_equal_(act, exp) invisible(act$val) } expect_all_equal_ <- function(act, exp, trace_env = caller_env()) { check_vector(act$val, error_call = trace_env, error_arg = "object") if (length(act$val) == 0) { cli::cli_abort("{.arg object} must not be empty.", call = trace_env) } check_vector(exp$val, error_call = trace_env, error_arg = "expected") if (length(exp$val) != 1) { cli::cli_abort("{.arg expected} must be length 1.", call = trace_env) } exp$val <- rep(exp$val, length(act$val)) names(exp$val) <- names(act$val) expect_waldo_equal_( "Expected every element of %s to equal %s.", act, exp, tolerance = testthat_tolerance(), trace_env = trace_env ) } testthat/R/snapshot-file.R0000644000176200001440000002327515111027202015212 0ustar liggesusers#' Do you expect this code to create the same file as last time? #' #' @description #' Whole file snapshot testing is designed for testing objects that don't have #' a convenient textual representation, with initial support for images #' (`.png`, `.jpg`, `.svg`), data frames (`.csv`), and text files #' (`.R`, `.txt`, `.json`, ...). #' #' The first time `expect_snapshot_file()` is run, it will create #' `_snaps/{test}/{name}.{ext}` containing reference output. Future runs will #' be compared to this reference: if different, the test will fail and the new #' results will be saved in `_snaps/{test}/{name}.new.{ext}`. To review #' failures, call [snapshot_review()]. #' #' We generally expect this function to be used via a wrapper that takes care #' of ensuring that output is as reproducible as possible, e.g. automatically #' skipping tests where it's known that images can't be reproduced exactly. #' #' @param path Path to file to snapshot. Optional for #' `announce_snapshot_file()` if `name` is supplied. #' @param name Snapshot name, taken from `path` by default. #' @param binary `r lifecycle::badge("deprecated")` Please use the #' `compare` argument instead. #' @param compare A function used to compare the snapshot files. It should take #' two inputs, the paths to the `old` and `new` snapshot, and return either #' `TRUE` or `FALSE`. This defaults to `compare_file_text` if `name` has #' extension `.r`, `.R`, `.Rmd`, `.md`, or `.txt`, and otherwise uses #' `compare_file_binary`. #' #' `compare_file_binary()` compares byte-by-byte and #' `compare_file_text()` compares lines-by-line, ignoring #' the difference between Windows and Mac/Linux line endings. #' @param variant If not-`NULL`, results will be saved in #' `_snaps/{variant}/{test}/{name}`. This allows you to create #' different snapshots for different scenarios, like different operating #' systems or different R versions. #' #' Note that there's no way to declare all possible variants up front which #' means that as soon as you start using variants, you are responsible for #' deleting snapshot variants that are no longer used. (testthat will still #' delete all variants if you delete the test.) #' @inheritParams expect_snapshot #' #' @section Announcing snapshots: #' testthat automatically detects dangling snapshots that have been #' written to the `_snaps` directory but which no longer have #' corresponding R code to generate them. These dangling files are #' automatically deleted so they don't clutter the snapshot #' directory. #' #' This can cause problems if your test is conditionally executed, either #' because of an `if` statement or a [skip()]. To avoid files being deleted in #' this case, you can call `announce_snapshot_file()` before the conditional #' code. #' #' ```R #' test_that("can save a file", { #' if (!can_save()) { #' announce_snapshot_file(name = "data.txt") #' skip("Can't save file") #' } #' path <- withr::local_tempfile() #' expect_snapshot_file(save_file(path, mydata()), "data.txt") #' }) #' ``` #' #' @export #' @examples #' #' # To use expect_snapshot_file() you'll typically need to start by writing #' # a helper function that creates a file from your code, returning a path #' save_png <- function(code, width = 400, height = 400) { #' path <- tempfile(fileext = ".png") #' png(path, width = width, height = height) #' on.exit(dev.off()) #' code #' #' path #' } #' path <- save_png(plot(1:5)) #' path #' #' \dontrun{ #' expect_snapshot_file(save_png(hist(mtcars$mpg)), "plot.png") #' } #' #' # You'd then also provide a helper that skips tests where you can't #' # be sure of producing exactly the same output. #' expect_snapshot_plot <- function(name, code) { #' # Announce the file before touching skips or running `code`. This way, #' # if the skips are active, testthat will not auto-delete the corresponding #' # snapshot file. #' name <- paste0(name, ".png") #' announce_snapshot_file(name = name) #' #' # Other packages might affect results #' skip_if_not_installed("ggplot2", "2.0.0") #' # Or maybe the output is different on some operating systems #' skip_on_os("windows") #' # You'll need to carefully think about and experiment with these skips #' #' path <- save_png(code) #' expect_snapshot_file(path, name) #' } expect_snapshot_file <- function( path, name = basename(path), binary = deprecated(), cran = FALSE, compare = NULL, transform = NULL, variant = NULL ) { lab <- quo_label(enquo(path)) check_string(path) if (!file.exists(path)) { cli::cli_abort("{.path {path}} doesn't exist.") } check_string(name) check_bool(cran) check_variant(variant) edition_require(3, "expect_snapshot_file()") announce_snapshot_file(name = name) if (!cran && on_cran()) { signal_snapshot_on_cran() return(invisible()) } snapshotter <- get_snapshotter() if (is.null(snapshotter)) { snapshot_not_available(path) return(invisible()) } is_text <- is_text_file(name) if (!is_missing(binary)) { lifecycle::deprecate_warn( "3.0.3", "expect_snapshot_file(binary = )", "expect_snapshot_file(compare = )" ) compare <- if (binary) compare_file_binary else compare_file_text } if (is.null(compare)) { compare <- if (is_text) compare_file_text else compare_file_binary } if (!is.null(transform)) { lines <- brio::read_lines(path) lines <- transform(lines) brio::write_lines(lines, path) } equal <- snapshotter$take_file_snapshot( name, path, file_equal = compare, variant = variant, ) if (inherits(equal, "expectation_failure")) { return(equal) } file <- snapshotter$file if (!equal) { if (is_text) { base <- paste0(c(snapshotter$snap_dir, variant, file), collapse = "/") old_path <- paste0(c(base, name), collapse = "/") new_path <- paste0(c(base, new_name(name)), collapse = "/") comp <- waldo_compare( x = brio::read_lines(old_path), x_arg = "old", y = brio::read_lines(new_path), y_arg = "new", quote_strings = FALSE ) comp <- c("Differences:", comp) } else { comp <- NULL } hint <- snapshot_hint(paste0(file, "/"), show_accept = is_text) msg <- c( sprintf("Snapshot of %s has changed.", lab), comp, hint ) snapshot_fail(msg) } else { pass() } } is_text_file <- function(path) { ext <- tools::file_ext(path) ext %in% c("r", "R", "txt", "md", "Rmd", "qmd", "json") } #' @rdname expect_snapshot_file #' @export announce_snapshot_file <- function(path, name = basename(path)) { edition_require(3, "announce_snapshot_file()") snapshotter <- get_snapshotter() if (!is.null(snapshotter)) { snapshotter$announce_file_snapshot(name) } } snapshot_file_equal <- function( snap_dir, # _snaps/ snap_test, # test file name snap_name, # snapshot file name snap_variant, # variant (optional) path, # path to new file file_equal = compare_file_binary, fail_on_new = NULL, trace_env = caller_env() ) { if (!file.exists(path)) { cli::cli_abort("{.path {path}} not found.", call = trace_env) } if (is.null(snap_variant)) { snap_test_dir <- file.path(snap_dir, snap_test) } else { snap_test_dir <- file.path(snap_dir, snap_variant, snap_test) } fail_on_new <- fail_on_new %||% on_ci() cur_path <- file.path(snap_test_dir, snap_name) new_path <- file.path(snap_test_dir, new_name(snap_name)) if (file.exists(cur_path)) { eq <- file_equal(cur_path, path) if (!eq) { file.copy(path, new_path, overwrite = TRUE) } else { # in case it exists from a previous run unlink(new_path) } eq } else { dir.create(snap_test_dir, showWarnings = FALSE, recursive = TRUE) file.copy(path, cur_path) message <- paste_c( "Adding new file snapshot: 'tests/testthat/_snaps/", c(snap_variant, if (!is.null(snap_variant)) "/"), c(snap_test, "/"), snap_name, "'" ) # We want to fail on CI since this suggests that the user has failed # to record the value locally if (fail_on_new) { snapshot_fail(message, trace_env = trace_env) } else { testthat_warn(message) } TRUE } } # Helpers ----------------------------------------------------------------- new_name <- function(x) { pieces <- split_path(x) paste0( pieces$dir, ifelse(pieces$dir == "", "", "/"), pieces$name, ".new.", pieces$ext ) } split_path <- function(path) { dir <- dirname(path) dir[dir == "."] <- "" name <- basename(path) ext_loc <- regexpr(".", name, fixed = TRUE) no_ext <- ext_loc == -1L name_sans_ext <- ifelse(no_ext, name, substr(name, 1, ext_loc - 1)) ext <- ifelse(no_ext, "", substr(name, ext_loc + 1, nchar(name))) list( dir = dir, name = name_sans_ext, ext = ext ) } write_tmp_lines <- function( lines, ext = ".txt", eol = "\n", envir = caller_env() ) { path <- withr::local_tempfile(fileext = ext, .local_envir = envir) brio::write_lines(lines, path, eol = eol) path } local_snap_dir <- function(paths, .env = parent.frame()) { dir <- withr::local_tempfile(.local_envir = .env) withr::defer(unlink(paths), envir = .env) dirs <- file.path(dir, unique(dirname(paths))) for (d in dirs) { dir.create(d, showWarnings = FALSE, recursive = TRUE) } file.create(file.path(dir, paths)) dir } #' @rdname expect_snapshot_file #' @param old,new Paths to old and new snapshot files. #' @export compare_file_binary <- function(old, new) { old <- brio::read_file_raw(old) new <- brio::read_file_raw(new) identical(old, new) } #' @rdname expect_snapshot_file #' @export compare_file_text <- function(old, new) { old <- brio::read_lines(old) new <- brio::read_lines(new) identical(old, new) } testthat/R/expect-condition.R0000644000176200001440000003725115077667437015750 0ustar liggesusers#' Do you expect an error, warning, message, or other condition? #' #' @description #' `expect_error()`, `expect_warning()`, `expect_message()`, and #' `expect_condition()` check that code throws an error, warning, message, #' or condition with a message that matches `regexp`, or a class that inherits #' from `class`. See below for more details. #' #' In the 3rd edition, these functions match (at most) a single condition. All #' additional and non-matching (if `regexp` or `class` are used) conditions #' will bubble up outside the expectation. If these additional conditions #' are important you'll need to catch them with additional #' `expect_message()`/`expect_warning()` calls; if they're unimportant you #' can ignore with [suppressMessages()]/[suppressWarnings()]. #' #' It can be tricky to test for a combination of different conditions, #' such as a message followed by an error. [expect_snapshot()] is #' often an easier alternative for these more complex cases. #' #' @section Testing `message` vs `class`: #' When checking that code generates an error, it's important to check that the #' error is the one you expect. There are two ways to do this. The first #' way is the simplest: you just provide a `regexp` that match some fragment #' of the error message. This is easy, but fragile, because the test will #' fail if the error message changes (even if its the same error). #' #' A more robust way is to test for the class of the error, if it has one. #' You can learn more about custom conditions at #' , but in #' short, errors are S3 classes and you can generate a custom class and check #' for it using `class` instead of `regexp`. #' #' If you are using `expect_error()` to check that an error message is #' formatted in such a way that it makes sense to a human, we recommend #' using [expect_snapshot()] instead. #' #' @export #' @family expectations #' @inheritParams expect_that #' @param regexp Regular expression to test against. #' * A character vector giving a regular expression that must match the #' error message. #' * If `NULL`, the default, asserts that there should be an error, #' but doesn't test for a specific value. #' * If `NA`, asserts that there should be no errors, but we now recommend #' using [expect_no_error()] and friends instead. #' #' Note that you should only use `message` with errors/warnings/messages #' that you generate. Avoid tests that rely on the specific text generated by #' another package since this can easily change. If you do need to test text #' generated by another package, either protect the test with `skip_on_cran()` #' or use `expect_snapshot()`. #' @inheritDotParams expect_match -object -regexp -info -label -all #' @param class Instead of supplying a regular expression, you can also supply #' a class name. This is useful for "classed" conditions. #' @param inherit Whether to match `regexp` and `class` across the #' ancestry of chained errors. #' @param all *DEPRECATED* If you need to test multiple warnings/messages #' you now need to use multiple calls to `expect_message()`/ #' `expect_warning()` #' @seealso [expect_no_error()], `expect_no_warning()`, #' `expect_no_message()`, and `expect_no_condition()` to assert #' that code runs without errors/warnings/messages/conditions. #' @return If `regexp = NA`, the value of the first argument; otherwise #' the captured condition. #' @examples #' # Errors ------------------------------------------------------------------ #' f <- function() stop("My error!") #' expect_error(f()) #' expect_error(f(), "My error!") #' #' # You can use the arguments of grepl to control the matching #' expect_error(f(), "my error!", ignore.case = TRUE) #' #' # Note that `expect_error()` returns the error object so you can test #' # its components if needed #' err <- expect_error(rlang::abort("a", n = 10)) #' expect_equal(err$n, 10) #' #' # Warnings ------------------------------------------------------------------ #' f <- function(x) { #' if (x < 0) { #' warning("*x* is already negative") #' return(x) #' } #' -x #' } #' expect_warning(f(-1)) #' expect_warning(f(-1), "already negative") #' expect_warning(f(1), NA) #' #' # To test message and output, store results to a variable #' expect_warning(out <- f(-1), "already negative") #' expect_equal(out, -1) #' #' # Messages ------------------------------------------------------------------ #' f <- function(x) { #' if (x < 0) { #' message("*x* is already negative") #' return(x) #' } #' #' -x #' } #' expect_message(f(-1)) #' expect_message(f(-1), "already negative") #' expect_message(f(1), NA) expect_error <- function( object, regexp = NULL, class = NULL, ..., inherit = TRUE, info = NULL, label = NULL ) { check_string(regexp, allow_null = TRUE, allow_na = TRUE) check_string(class, allow_null = TRUE) check_bool(inherit) if (edition_get() >= 3) { expect_condition_matching_( "error", {{ object }}, regexp = regexp, class = class, ..., inherit = inherit, info = info, label = label ) } else { act <- quasi_capture(enquo(object), label, capture_error, entrace = TRUE) msg <- compare_condition_2e( act$cap, act$lab, regexp = regexp, class = class, ..., inherit = inherit ) # Access error fields with `[[` rather than `$` because the # `$.Throwable` from the rJava package throws with unknown fields if (!is.null(msg)) { fail(msg, info = info, trace = act$cap[["trace"]]) } else { pass() } invisible(act$val %||% act$cap) } } #' @export #' @rdname expect_error expect_warning <- function( object, regexp = NULL, class = NULL, ..., inherit = TRUE, all = FALSE, info = NULL, label = NULL ) { check_string(regexp, allow_null = TRUE, allow_na = TRUE) check_string(class, allow_null = TRUE) check_bool(inherit) check_bool(all) if (edition_get() >= 3) { if (!missing(all)) { cli::cli_warn("The {.arg all} argument is deprecated.") } expect_condition_matching_( "warning", {{ object }}, regexp = regexp, class = class, ..., inherit = inherit, info = info, label = label ) } else { act <- quasi_capture( enquo(object), label, capture_warnings, ignore_deprecation = identical(regexp, NA) ) msg <- compare_messages( act$cap, act$lab, regexp = regexp, all = all, ..., cond_type = "warnings" ) if (!is.null(msg)) { fail(msg, info = info) } else { pass() } invisible(act$val) } } #' @export #' @rdname expect_error expect_message <- function( object, regexp = NULL, class = NULL, ..., inherit = TRUE, all = FALSE, info = NULL, label = NULL ) { check_string(regexp, allow_null = TRUE, allow_na = TRUE) check_string(class, allow_null = TRUE) check_bool(inherit) check_bool(all) if (edition_get() >= 3) { expect_condition_matching_( "message", {{ object }}, regexp = regexp, class = class, ..., inherit = inherit, info = info, label = label ) } else { act <- quasi_capture(enquo(object), label, capture_messages) msg <- compare_messages(act$cap, act$lab, regexp = regexp, all = all, ...) if (!is.null(msg)) { fail(msg, info = info) } else { pass() } invisible(act$val) } } #' @export #' @rdname expect_error expect_condition <- function( object, regexp = NULL, class = NULL, ..., inherit = TRUE, info = NULL, label = NULL ) { check_string(regexp, allow_null = TRUE, allow_na = TRUE) check_string(class, allow_null = TRUE) check_bool(inherit) if (edition_get() >= 3) { expect_condition_matching_( "condition", {{ object }}, regexp = regexp, class = class, ..., inherit = inherit, info = info, label = label ) } else { act <- quasi_capture( enquo(object), label, capture_condition, entrace = TRUE ) msg <- compare_condition_2e( act$cap, act$lab, regexp = regexp, class = class, ..., inherit = inherit, cond_type = "condition" ) if (!is.null(msg)) { fail(msg, info = info, trace = act$cap[["trace"]]) } else { pass() } invisible(act$val %||% act$cap) } } expect_condition_matching_ <- function( base_class, object, regexp = NULL, class = NULL, ..., inherit = TRUE, info = NULL, label = NULL, trace_env = caller_env(), error_call = caller_env() ) { check_condition_dots(regexp, ..., error_call = error_call) matcher <- cnd_matcher( base_class, class, regexp, ..., inherit = inherit, ignore_deprecation = base_class == "warning" && identical(regexp, NA), error_call = error_call ) act <- quasi_capture( enquo(object), label, capture_matching_condition, matches = matcher ) expected <- !identical(regexp, NA) msg <- compare_condition_3e(base_class, class, act$cap, act$lab, expected) # Access error fields with `[[` rather than `$` because the # `$.Throwable` from the rJava package throws with unknown fields if (!is.null(msg)) { fail( msg, info = info, trace = act$cap[["trace"]], trace_env = trace_env ) } else { pass() } # If a condition was expected, return it. Otherwise return the value # of the expression. invisible(if (expected) act$cap else act$val) } # ------------------------------------------------------------------------- cnd_matcher <- function( base_class, class = NULL, regexp = NULL, ..., inherit = TRUE, ignore_deprecation = FALSE, error_call = caller_env() ) { check_string(class, allow_null = TRUE, call = error_call) check_string(regexp, allow_null = TRUE, allow_na = TRUE, call = error_call) function(cnd) { if (!inherit) { cnd$parent <- NULL } if (ignore_deprecation && is_deprecation(cnd)) { return(FALSE) } matcher <- function(x) { if (!inherits(x, base_class)) { return(FALSE) } if (!is.null(class) && !inherits(x, class)) { return(FALSE) } if (!is.null(regexp) && !identical(regexp, NA)) { withCallingHandlers( grepl(regexp, conditionMessage(x), ...), error = function(e) { cli::cli_abort( "Failed to compare {base_class} to {.arg regexp}.", parent = e, call = error_call ) } ) } else { TRUE } } cnd_some(cnd, matcher) } } has_classes <- function(x, classes) { all(classes %in% class(x)) } is_deprecation <- function(x) { inherits(x, "lifecycle_warning_deprecated") } cnd_some <- function(.cnd, .p, ...) { .p <- as_function(.p) while (is_condition(.cnd)) { if (.p(.cnd, ...)) { return(TRUE) } .cnd <- .cnd$parent } FALSE } capture_matching_condition <- function(expr, matches) { matched <- NULL tl <- current_env() withCallingHandlers(expr, condition = function(cnd) { if (!is.null(matched) || !matches(cnd)) { return() } if (can_entrace(cnd)) { cnd <- cnd_entrace(cnd) } matched <<- cnd if (inherits(cnd, "message") || inherits(cnd, "warning")) { cnd_muffle(cnd) } else if (inherits(cnd, "error") || inherits(cnd, "skip")) { return_from(tl, cnd) } }) matched } # Helpers ----------------------------------------------------------------- compare_condition_3e <- function(cond_type, cond_class, cond, lab, expected) { if (expected) { if (is.null(cond)) { if (is.null(cond_class)) { sprintf("Expected %s to throw a %s.", lab, cond_type) } else { sprintf( "Expected %s to throw a %s with class <%s>.", lab, cond_type, cond_class ) } } else { NULL } } else { if (!is.null(cond)) { c( sprintf("Expected %s not to throw any %ss.", lab, cond_type), actual_condition(cond) ) } else { NULL } } } compare_condition_2e <- function( cond, lab, regexp = NULL, class = NULL, ..., inherit = TRUE, cond_type = "error" ) { # Expecting no condition if (identical(regexp, NA)) { if (!is.null(cond)) { return(sprintf( "%s threw an %s.\nMessage: %s\nClass: %s", lab, cond_type, cnd_message(cond), paste(class(cond), collapse = "/") )) } else { return() } } # Otherwise we're definitely expecting a condition if (is.null(cond)) { return(sprintf("Expected %s to throw a %s.", lab, cond_type)) } matches <- cnd_matches_2e(cond, class, regexp, inherit, ...) ok_class <- matches[["class"]] ok_msg <- matches[["msg"]] # All good if (ok_msg && ok_class) { return() } problems <- c(if (!ok_class) "class", if (!ok_msg) "message") message <- cnd_message(cond) details <- c( if (!ok_class) { sprintf( "Expected class: %s\nActual class: %s\nMessage: %s", paste0(class, collapse = "/"), paste0(class(cond), collapse = "/"), message ) }, if (!ok_msg) { sprintf( "Expected match: %s\nActual message: %s", encodeString(regexp, quote = '"'), encodeString(message, quote = '"') ) } ) sprintf( "%s threw an %s with unexpected %s.\n%s", lab, cond_type, paste(problems, collapse = " and "), paste(details, collapse = "\n") ) } cnd_matches_2e <- function(cnd, class, regexp, inherit, ...) { if (!inherit) { cnd$parent <- NULL } ok_class <- is.null(class) || cnd_inherits(cnd, class) ok_msg <- is.null(regexp) || cnd_some(cnd, function(x) { any(grepl(regexp, cnd_message(x), ...)) }) c(class = ok_class, msg = ok_msg) } compare_messages <- function( messages, lab, regexp = NA, ..., all = FALSE, cond_type = "messages" ) { bullets <- paste0("* ", messages, collapse = "\n") # Expecting no messages if (identical(regexp, NA)) { if (length(messages) > 0) { return(sprintf( "Expected %s not to generate %s.\nActually generated:\n%s", lab, cond_type, bullets )) } else { return() } } # Otherwise we're definitely expecting messages if (length(messages) == 0) { return(sprintf("Expected %s to produce %s.", lab, cond_type)) } if (is.null(regexp)) { return() } matched <- grepl(regexp, messages, ...) # all/any ok if ((all && all(matched)) || (!all && any(matched))) { return() } sprintf( "%s produced unexpected %s.\n%s\n%s", lab, cond_type, paste0("Expected match: ", encodeString(regexp)), paste0("Actual values:\n", bullets) ) } # Disable rlang backtrace reminders so they don't interfere with # expected error messages cnd_message <- function(x) { withr::local_options(rlang_backtrace_on_error = "none") conditionMessage(x) } check_condition_dots <- function( regexp = NULL, ..., error_call = caller_env() ) { if (!is.null(regexp) || missing(...)) { return() } dot_names <- ...names() if (is.null(dot_names)) { dot_names <- rep("", ...length()) } unnamed <- dot_names == "" dot_names[unnamed] <- paste0("..", seq_along(dot_names)[unnamed]) cli::cli_abort( c( "Can't supply {.arg ...} unless {.arg regexp} is set.", "*" = "Unused arguments: {.arg {dot_names}}.", i = "Did you mean to use {.arg regexp} so {.arg ...} is passed to {.fn grepl}?" ), call = error_call ) } actual_condition <- function(cond) { paste0( "Actually got a <", class(cond)[[1]], "> with message:\n", indent_lines(cnd_message(cond)) ) } testthat/R/reporter-slow.R0000644000176200001440000000532115053661631015271 0ustar liggesusers#' Find slow tests #' #' @description #' `SlowReporter` is designed to identify slow tests. It reports the #' execution time for each test and can optionally filter out tests that #' run faster than a specified threshold (default: 1 second). This reporter #' is useful for performance optimization and identifying tests that may #' benefit from optimization or parallelization. #' #' @export #' @family reporters SlowReporter <- R6::R6Class( "SlowReporter", inherit = Reporter, public = list( min_time = 0.5, test_timings = NULL, current_test_start = NULL, current_file = NULL, initialize = function(min_time = 0.5, ...) { super$initialize(...) self$min_time <- min_time self$test_timings <- list() }, start_reporter = function(context) { self$cat_line( cli::style_bold("Slow tests"), " (showing tests >= ", self$min_time, "s)" ) self$cat_line() }, start_file = function(file) { self$current_file <- file }, start_test = function(context, test) { self$current_test_start <- proc.time()[[3]] }, end_test = function(context, test) { if (is.null(self$current_test_start)) { return() } time_taken <- proc.time()[[3]] - self$current_test_start # Store timing information timing <- list( file = self$current_file, test = test, time = time_taken ) self$test_timings[[length(self$test_timings) + 1]] <- timing if (time_taken >= self$min_time) { self$show_timing(timing) } self$current_test_start <- NULL }, end_reporter = function() { if (length(self$test_timings) == 0) { return() } all_times <- map_dbl(self$test_timings, \(x) x$time) is_slow <- all_times >= self$min_time self$cat_line() self$rule(cli::style_bold("Summary"), line = 2) self$cat_line("All tests: ", sprintf("%.2fs", sum(all_times))) self$cat_line("Slow tests: ", sprintf("%.2fs", sum(all_times[is_slow]))) if (sum(is_slow) <= 10) { return() } # Sort by time descending for summary slowest <- self$test_timings[order(all_times, decreasing = TRUE)] self$cat_line() self$rule(cli::style_bold("Slowest tests:"), line = 1) # Show top 10 slowest tests for (i in 1:10) { self$show_timing(slowest[[i]]) } if (length(slowest) > 10) { self$cat_line("... and ", length(slowest) - 10, " more slow tests") } self$cat_line() }, show_timing = function(timing) { time <- sprintf("%.2fs", timing$time) self$cat_line("[", time, "] ", time, " ", timing$file, ": ", timing$test) } ) ) testthat/R/reporter-progress.R0000644000176200001440000003770615127535363016171 0ustar liggesusers#' Report progress interactively #' #' @description #' `ProgressReporter` is designed for interactive use. Its goal is to #' give you actionable insights to help you understand the status of your #' code. This reporter also praises you from time-to-time if all your tests #' pass. It's the default reporter for [test_dir()]. #' #' `ParallelProgressReporter` is very similar to `ProgressReporter`, but #' works better for packages that want parallel tests. #' #' `CompactProgressReporter` is a minimal version of `ProgressReporter` #' designed for use with single files. It's the default reporter for #' [test_file()]. #' #' @export #' @family reporters ProgressReporter <- R6::R6Class( "ProgressReporter", inherit = Reporter, public = list( show_praise = TRUE, min_time = 1, start_time = NULL, last_update = NULL, update_interval = NULL, skips = NULL, problems = NULL, max_fail = NULL, n_ok = 0, n_skip = 0, n_warn = 0, n_fail = 0, frames = NULL, dynamic = FALSE, ctxt_start_time = NULL, ctxt_issues = NULL, ctxt_n = 0, ctxt_n_ok = 0, ctxt_n_skip = 0, ctxt_n_warn = 0, ctxt_n_fail = 0, ctxt_name = "", file_name = "", initialize = function( show_praise = TRUE, max_failures = testthat_max_fails(), min_time = 1, update_interval = 0.1, ... ) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$show_praise <- show_praise self$max_fail <- max_failures self$min_time <- min_time self$update_interval <- update_interval self$skips <- Stack$new() self$problems <- Stack$new() self$ctxt_issues <- Stack$new() # Capture at init so not affected by test settings self$frames <- cli::get_spinner()$frames self$dynamic <- cli::is_dynamic_tty() }, is_full = function() { self$n_fail >= self$max_fail }, start_reporter = function(context) { self$start_time <- proc.time() self$show_header() }, start_file = function(file) { self$file_name <- file self$ctxt_issues <- Stack$new() self$ctxt_start_time <- proc.time() context_start_file(self$file_name) }, start_context = function(context) { self$ctxt_name <- context self$ctxt_issues <- Stack$new() self$ctxt_n <- 0L self$ctxt_n_ok <- 0L self$ctxt_n_fail <- 0L self$ctxt_n_warn <- 0L self$ctxt_n_skip <- 0L self$ctxt_start_time <- proc.time() self$show_status() }, show_header = function() { self$cat_line( colourise(cli::symbol$tick, "success"), " | ", colourise("F", "failure"), " ", colourise("W", "warning"), " ", colourise(" S", "skip"), " ", colourise(" OK", "success"), " | ", "Context" ) }, status_data = function() { list( n = self$ctxt_n, n_ok = self$ctxt_n_ok, n_fail = self$ctxt_n_fail, n_warn = self$ctxt_n_warn, n_skip = self$ctxt_n_skip, name = self$ctxt_name ) }, show_status = function(complete = FALSE, time = 0, pad = FALSE) { data <- self$status_data() if (complete) { if (data$n_fail > 0) { status <- cli::col_red(cli::symbol$cross) } else { status <- cli::col_green(cli::symbol$tick) } } else { # Do not print if not enough time has passed since we last printed. if (!self$should_update()) { return() } status <- spinner(self$frames, data$n) if (data$n_fail > 0) { status <- colourise(status, "failure") } else if (data$n_warn > 0) { status <- colourise(status, "warning") } } col_format <- function(n, type) { if (n == 0) { if (type == "skip") { " " } else { " " } } else { if (type == "skip") { colourise(sprintf("%2d", n), type) } else { colourise(n, type) } } } message <- paste0( status, " | ", col_format(data$n_fail, "fail"), " ", col_format(data$n_warn, "warn"), " ", col_format(data$n_skip, "skip"), " ", sprintf("%3d", data$n_ok), " | ", data$name ) if (complete && time > self$min_time) { message <- paste0( message, cli::col_grey(sprintf(" [%.1fs]", time)) ) } if (pad) { message <- strpad(message, self$width) message <- cli::ansi_substr(message, 1, self$width) } if (!complete) { message <- strpad(message, self$width) self$cat_tight(self$cr(), message) } else { self$cat_line(self$cr(), message) } }, cr = function() { if (self$dynamic) { "\r" } else { "\n" } }, end_context = function(context) { time <- proc.time() - self$ctxt_start_time self$last_update <- NULL # context with no expectation = automatic file context in file # that also has manual contexts if (self$ctxt_n == 0) { return() } self$show_status(complete = TRUE, time = time[[3]]) self$report_issues(self$ctxt_issues) if (self$is_full()) { self$report_full() } }, report_full = function() { snapshotter <- get_snapshotter() if (!is.null(snapshotter)) { snapshotter$end_file() } # Separate from the progress bar self$cat_line() stop_reporter(c( "Maximum number of failures exceeded; quitting.", i = "Increase this number with (e.g.) {.run testthat::set_max_fails(Inf)}" )) }, add_result = function(context, test, result) { self$ctxt_n <- self$ctxt_n + 1L if (expectation_broken(result)) { self$n_fail <- self$n_fail + 1 self$ctxt_n_fail <- self$ctxt_n_fail + 1 self$ctxt_issues$push(result) self$problems$push(result) } else if (expectation_skip(result)) { self$n_skip <- self$n_skip + 1 self$ctxt_n_skip <- self$ctxt_n_skip + 1 self$skips$push(result) } else if (expectation_warning(result)) { self$n_warn <- self$n_warn + 1 self$ctxt_n_warn <- self$ctxt_n_warn + 1 self$ctxt_issues$push(result) } else { self$n_ok <- self$n_ok + 1 self$ctxt_n_ok <- self$ctxt_n_ok + 1 } self$show_status() }, end_reporter = function() { if (self$is_full()) { return() } self$cat_line() colour_if <- function(n, type) { colourise(n, if (n == 0) "success" else type) } self$rule(cli::style_bold("Results"), line = 2) time <- proc.time() - self$start_time if (time[[3]] > self$min_time) { self$cat_line("Duration: ", sprintf("%.1f s", time[[3]]), col = "cyan") self$cat_line() } skip_report(self) if (self$problems$size() > 0) { problems <- self$problems$as_list() self$rule("Failed tests", line = 1) for (problem in problems) { self$cat_line(issue_summary(problem)) self$cat_line() } } status <- summary_line(self$n_fail, self$n_warn, self$n_skip, self$n_ok) self$cat_line(status) if (self$is_full()) { self$rule("Terminated early", line = 2) } if (!self$show_praise || stats::runif(1) > 0.1) { return() } self$cat_line() if (self$n_fail == 0) { self$cat_line(colourise(praise(), "success")) } else { self$cat_line(colourise(encourage(), "error")) } }, report_issues = function(issues) { if (issues$size() > 0) { self$rule() issues <- issues$as_list() summary <- map_chr(issues, issue_summary) self$cat_tight(paste(summary, collapse = "\n\n")) self$cat_line() self$rule() } }, should_update = function() { if (self$update_interval == 0) { return(TRUE) } if (identical(self$update_interval, Inf)) { return(FALSE) } time <- proc.time()[[3]] if ( !is.null(self$last_update) && (time - self$last_update) < self$update_interval ) { return(FALSE) } self$last_update <- time TRUE } ) ) testthat_max_fails <- function() { val <- getOption("testthat.progress.max_fails") if (is.null(val)) { env <- Sys.getenv("TESTTHAT_MAX_FAILS") val <- if (!identical(env, "")) as.numeric(env) else 10 } val } #' @export #' @rdname ProgressReporter CompactProgressReporter <- R6::R6Class( "CompactProgressReporter", inherit = ProgressReporter, public = list( initialize = function(min_time = Inf, ...) { super$initialize(min_time = min_time, ...) }, start_file = function(name) { if (!self$rstudio) { self$cat_line() self$rule(cli::style_bold(paste0("Testing ", name)), line = 2) } super$start_file(name) }, start_reporter = function(context) {}, end_context = function(context) { if (self$ctxt_issues$size() == 0) { return() } self$cat_line() self$cat_line() issues <- self$ctxt_issues$as_list() summary <- map_chr(issues, issue_summary, rule = TRUE) self$cat_tight(paste(summary, collapse = "\n\n")) self$cat_line() }, end_reporter = function() { had_feedback <- self$n_fail > 0 || self$n_warn > 0 if (self$n_skip > 0) { if (!had_feedback) { self$cat_line() } self$cat_line() skip_report(self) } if (had_feedback) { self$show_status() self$cat_line() } else if (self$is_full()) { self$cat_line(" Terminated early") } else if (self$n_skip == 0 && !self$rstudio) { self$cat_line(cli::style_bold(" Done!")) } }, show_status = function(complete = NULL) { self$local_user_output() status <- summary_line(self$n_fail, self$n_warn, self$n_skip, self$n_ok) self$cat_tight(self$cr(), status) } ) ) # parallel progres reporter ----------------------------------------------- #' @export #' @rdname ProgressReporter ParallelProgressReporter <- R6::R6Class( "ParallelProgressReporter", inherit = ProgressReporter, public = list( files = list(), spin_frame = 0L, is_rstudio = FALSE, initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$capabilities$parallel_updates <- TRUE self$update_interval <- 0.05 self$is_rstudio <- Sys.getenv("RSTUDIO", "") == "1" }, start_file = function(file) { if (!file %in% names(self$files)) { self$files[[file]] <- list( issues = Stack$new(), n_fail = 0L, n_skip = 0L, n_warn = 0L, n_ok = 0L, name = context_name(file), start_time = proc.time() ) } self$file_name <- file }, start_context = function(context) { # we'll just silently ignore this }, end_context = function(context) { # we'll just silently ignore this }, end_file = function() { if (!self$is_full()) { fsts <- self$files[[self$file_name]] time <- proc.time() - fsts$start_time # Workaround for https://github.com/rstudio/rstudio/issues/7649 if (self$is_rstudio) { self$cat_tight(strpad(self$cr(), self$width + 1)) # +1 for \r } self$show_status(complete = TRUE, time = time[[3]], pad = TRUE) self$report_issues(fsts$issues) self$files[[self$file_name]] <- NULL if (length(self$files)) { self$update(force = TRUE) } } else { self$update(force = TRUE) self$cat_line() self$rule() issues <- unlist( map(self$files, \(x) x$issues$as_list()), recursive = FALSE ) summary <- map_chr(issues, issue_summary) self$cat_tight(paste(summary, collapse = "\n\n")) self$cat_line() self$report_full() } }, end_reporter = function() { self$cat_tight(self$cr(), strpad("", self$width)) super$end_reporter() }, show_header = function() { super$show_header() self$update(force = TRUE) }, status_data = function() { self$files[[self$file_name]] }, add_result = function(context, test, result) { self$ctxt_n <- self$ctxt_n + 1L file <- self$file_name if (expectation_broken(result)) { self$n_fail <- self$n_fail + 1 self$files[[file]]$n_fail <- self$files[[file]]$n_fail + 1L self$files[[file]]$issues$push(result) self$problems$push(result) } else if (expectation_skip(result)) { self$n_skip <- self$n_skip + 1 self$files[[file]]$n_skip <- self$files[[file]]$n_skip + 1L self$skips$push(result) } else if (expectation_warning(result)) { self$n_warn <- self$n_warn + 1 self$files[[file]]$n_warn <- self$files[[file]]$n_warn + 1L self$files[[file]]$issues$push(result) } else { self$n_ok <- self$n_ok + 1 self$files[[file]]$n_ok <- self$files[[file]]$n_ok + 1 } }, update = function(force = FALSE) { if (!force && !self$should_update()) { return() } self$spin_frame <- self$spin_frame + 1L status <- spinner(self$frames, self$spin_frame) message <- paste( status, summary_line(self$n_fail, self$n_warn, self$n_skip, self$n_ok), if (length(self$files) > 0) "@" else "Starting up...", paste(context_name(names(self$files)), collapse = ", ") ) message <- strpad(message, self$width) message <- cli::ansi_substr(message, 1, self$width) self$cat_tight(self$cr(), message) } ) ) # helpers ----------------------------------------------------------------- spinner <- function(frames, i) { frames[((i - 1) %% length(frames)) + 1] } issue_header <- function(x, pad = FALSE, location = TRUE) { type <- expectation_type(x) if (has_colour()) { type <- colourise(first_upper(type), type) } else { type <- first_upper(type) } if (pad) { type <- strpad(type, 7) } paste0(type, if (location) expectation_location(x, " (", ")"), ": ", x$test) } issue_summary <- function(x, rule = FALSE, location = TRUE) { header <- cli::style_bold(issue_header(x, location = location)) if (rule) { # Don't truncate long test names width <- max(cli::ansi_nchar(header) + 6, getOption("width")) header <- cli::rule(header, width = width) } paste0(header, "\n", format(x)) } strpad <- function(x, width = cli::console_width()) { n <- pmax(0, width - cli::ansi_nchar(x)) paste0(x, strrep(" ", n)) } skip_report <- function(reporter, line = 1) { n <- reporter$skips$size() if (n == 0) { return() } reporter$rule(paste0("Skipped tests (", n, ")"), line = line) reporter$cat_line(skip_bullets(reporter$skips$as_list())) reporter$cat_line() } skip_bullets <- function(skips) { message <- map_chr(skips, "[[", "message") message <- gsub("Reason: ", "", message) message <- gsub(":?\n(\n|.)+", "", message) # only show first line locs <- map_chr(skips, expectation_location) locs_by_skip <- split(locs, message) n <- lengths(locs_by_skip) skip_summary <- map_chr(locs_by_skip, paste, collapse = ", ") bullets <- paste0( cli::symbol$bullet, " ", names(locs_by_skip), " (", n, "): ", skip_summary ) cli::ansi_strwrap(bullets, exdent = 2) } #' Set maximum number of test failures allowed before aborting the run #' #' This sets the `TESTTHAT_MAX_FAILS` env var which will affect both the #' current R process and any processes launched from it. #' #' @param n Maximum number of failures allowed. #' @export #' @keywords internal set_max_fails <- function(n) { Sys.setenv('TESTTHAT_MAX_FAILS' = n) } testthat/R/auto-test.R0000644000176200001440000001076115054053615014374 0ustar liggesusers#' Watches code and tests for changes, rerunning tests as appropriate. #' #' @description #' `r lifecycle::badge("superseded")` #' #' The idea behind `auto_test()` is that you just leave it running while #' you develop your code. Every time you save a file it will be automatically #' tested and you can easily see if your changes have caused any test #' failures. #' #' The current strategy for rerunning tests is as follows: #' #' - if any code has changed, then those files are reloaded and all tests #' rerun #' - otherwise, each new or modified test is run #' @seealso [auto_test_package()] #' @export #' @param code_path path to directory containing code #' @param test_path path to directory containing tests #' @param reporter test reporter to use #' @param env environment in which to execute test suite. #' @param hash Passed on to [watch()]. When FALSE, uses less accurate #' modification time stamps, but those are faster for large files. #' @keywords internal auto_test <- function( code_path, test_path, reporter = default_reporter(), env = test_env(), hash = TRUE ) { reporter <- find_reporter(reporter) code_path <- normalizePath(code_path) test_path <- normalizePath(test_path) # Start by loading all code and running all tests source_dir(code_path, env = env) test_dir(test_path, env = env, reporter = reporter$clone(deep = TRUE)) # Next set up watcher to monitor changes watcher <- function(added, deleted, modified) { changed <- normalizePath(c(added, modified)) tests <- changed[starts_with(changed, test_path)] code <- changed[starts_with(changed, code_path)] if (length(code) > 0) { # Reload code and rerun all tests cat("Changed code: ", paste0(basename(code), collapse = ", "), "\n") cat("Rerunning all tests\n") source_dir(code_path, env = env) test_dir(test_path, env = env, reporter = reporter$clone(deep = TRUE)) } else if (length(tests) > 0) { # If test changes, rerun just that test cat("Rerunning tests: ", paste0(basename(tests), collapse = ", "), "\n") test_files(tests, env = env, reporter = reporter$clone(deep = TRUE)) } TRUE } watch(c(code_path, test_path), watcher, hash = hash) } #' @param pkg path to package #' @export #' @param reporter test reporter to use #' @param hash Passed on to [watch()]. When FALSE, uses less accurate #' modification time stamps, but those are faster for large files. #' @rdname auto_test auto_test_package <- function( pkg = ".", reporter = default_reporter(), hash = TRUE ) { reporter <- find_reporter(reporter) path <- pkgload::pkg_path(pkg) package <- pkgload::pkg_name(path) code_path <- file.path(path, c("R", "src")) code_path <- code_path[file.exists(code_path)] code_path <- normalizePath(code_path) test_path <- normalizePath(file.path(path, "tests", "testthat")) # Start by loading all code and running all tests local_assume_not_on_cran() pkgload::load_all(path) test_dir( test_path, package = package, reporter = reporter$clone(deep = TRUE), stop_on_failure = FALSE ) # Next set up watcher to monitor changes watcher <- function(added, deleted, modified) { changed <- normalizePath(c(added, modified)) tests <- changed[starts_with(changed, test_path)] code <- changed[starts_with(changed, code_path)] # Remove helper from test and add it to code (if a helper changed, # like for code, reload all and rerun all tests) helper <- tests[starts_with(basename(tests), "helper-")] tests <- setdiff(tests, helper) code <- c(code, helper) if (length(code) > 0) { # Reload code and rerun all tests cat("Changed code: ", paste0(basename(code), collapse = ", "), "\n") cat("Rerunning all tests\n") pkgload::load_all(path, quiet = TRUE) test_dir( test_path, package = package, reporter = reporter$clone(deep = TRUE) ) } else if (length(tests) > 0) { # If test changes, rerun just that test cat("Rerunning tests: ", paste0(basename(tests), collapse = ", "), "\n") env <- env_clone(asNamespace(package)) test_files( test_dir = test_path, test_package = package, test_paths = tests, env = env, reporter = reporter$clone(deep = TRUE) ) } TRUE } watch(c(code_path, test_path), watcher, hash = hash) } # Helpers ----------------------------------------------------------------- starts_with <- function(string, prefix) { substr(string, 1, nchar(prefix)) == prefix } testthat/R/snapshot-github.R0000644000176200001440000001031115127554030015553 0ustar liggesusers#' Download snapshots from GitHub #' #' @description #' If your snapshots fail on GitHub, it can be a pain to figure out exactly #' why, or to incorporate them into your local package. This function makes it #' easy, only requiring you to interactively select which job you want to #' take the artifacts from. #' #' Note that you should not generally need to use this function manually; #' instead copy and paste from the hint emitted on GitHub. This hint is only #' emitted when running in a job named "R-CMD-check", since that's where the #' testthat artifact is typically uploaded. #' #' @param repository Repository owner/name, e.g. `"r-lib/testthat"`. #' @param run_id Run ID, e.g. `"47905180716"`. You can find this in the action url. #' @param dest_dir Package root directory. Defaults to the current directory. #' @export snapshot_download_gh <- function(repository, run_id, dest_dir = ".") { check_string(repository) check_string(run_id) check_string(dest_dir) check_installed("gh") dest_snaps <- file.path(dest_dir, "tests", "testthat", "_snaps") if (!dir.exists(dest_snaps)) { cli::cli_abort("No snapshot directory found in {.file {dest_dir}}.") } job_id <- gh_find_job(repository, run_id) artifact_id <- gh_find_artifact(repository, job_id) path <- withr::local_tempfile(pattern = "gh-snaps-") gh_download_artifact(repository, artifact_id, path) files <- dir(path, full.names = TRUE) if (length(files) != 1) { cli::cli_abort("Unexpected artifact format.") } inner_dir <- files[[1]] src_snaps <- file.path(inner_dir, "tests", "testthat", "_snaps") dir_copy(src_snaps, dest_snaps) } gh_find_job <- function(repository, run_id) { jobs_json <- gh::gh( "/repos/{repository}/actions/runs/{run_id}/jobs", repository = repository, run_id = run_id ) jobs <- data.frame( id = map_dbl(jobs_json$jobs, \(x) x$id), name = map_chr(jobs_json$jobs, \(x) x$name) ) jobs <- jobs[order(jobs$name), ] idx <- utils::menu(jobs$name, title = "Which job?") if (idx == 0) { cli::cli_abort("Selection cancelled.") } jobs$id[[idx]] } gh_find_artifact <- function(repository, job_id) { job_logs <- gh::gh( "GET /repos/{repository}/actions/jobs/{job_id}/logs", repository = repository, job_id = job_id, .send_headers = c("Accept" = "application/vnd.github.v3+json") ) log_lines <- strsplit(job_logs$message, "\r?\n")[[1]] matches <- re_match(log_lines, "Artifact download URL: (?.*)") matches <- matches[!is.na(matches$artifact_url), ] if (nrow(matches) == 0) { cli::cli_abort("Failed to find artifact.") } # Take last artifact URL; if the job has failed the previous artifact will # be the R CMD check logs artifact_url <- matches$artifact_url[nrow(matches)] basename(artifact_url) } gh_download_artifact <- function(repository, artifact_id, path) { zip_path <- withr::local_tempfile(pattern = "gh-zip-") gh::gh( "/repos/{repository}/actions/artifacts/{artifact_id}/{archive_format}", repository = repository, artifact_id = artifact_id, archive_format = "zip", .destfile = zip_path ) utils::unzip(zip_path, exdir = path) invisible(path) } # Directory helpers ------------------------------------------------------------ dir_create <- function(paths) { for (path in paths) { dir.create(path, recursive = TRUE, showWarnings = FALSE) } invisible(paths) } dir_copy <- function(src_dir, dst_dir) { # First create directories dirs <- list.dirs(src_dir, recursive = TRUE, full.names = FALSE) dir_create(file.path(dst_dir, dirs)) # Then copy files files <- dir(src_dir, recursive = TRUE) src_files <- file.path(src_dir, files) dst_files <- file.path(dst_dir, files) same <- map_lgl(seq_along(files), \(i) { same_file(src_files[[i]], dst_files[[i]]) }) n_new <- sum(!same) if (n_new == 0) { cli::cli_inform(c(i = "No new snapshots.")) } else { cli::cli_inform(c( v = "Copying {n_new} new snapshots: {.file {files[!same]}}." )) } file.copy(src_files[!same], dst_files[!same], overwrite = TRUE) invisible() } same_file <- function(x, y) { file.exists(x) && file.exists(y) && hash_file(x) == hash_file(y) } on_gh <- function() { Sys.getenv("GITHUB_ACTIONS") == "true" } testthat/R/evaluate-promise.R0000644000176200001440000000263715047715224015737 0ustar liggesusers#' Evaluate a promise, capturing all types of output. #' #' @param code Code to evaluate. #' @keywords internal #' @export #' @return A list containing #' \item{result}{The result of the function} #' \item{output}{A string containing all the output from the function} #' \item{warnings}{A character vector containing the text from each warning} #' \item{messages}{A character vector containing the text from each message} #' @examples #' evaluate_promise({ #' print("1") #' message("2") #' warning("3") #' 4 #' }) evaluate_promise <- function(code, print = FALSE) { warnings <- Stack$new() handle_warning <- function(condition) { if (!is_deprecation(condition)) { warnings$push(condition) tryInvokeRestart("muffleWarning") } } messages <- Stack$new() handle_message <- function(condition) { messages$push(condition) tryInvokeRestart("muffleMessage") } path <- withr::local_tempfile() result <- withr::with_output_sink( path, withCallingHandlers( withVisible(code), warning = handle_warning, message = handle_message ) ) if (result$visible && print) { withr::with_output_sink(path, print(result$value), append = TRUE) } output <- paste0(brio::read_lines(path), collapse = "\n") list( result = result$value, output = output, warnings = get_messages(warnings$as_list()), messages = get_messages(messages$as_list()) ) } testthat/R/import-standalone-purrr.R0000644000176200001440000001316715047715224017265 0ustar liggesusers# Standalone file: do not edit by hand # Source: https://github.com/r-lib/rlang/blob/HEAD/R/standalone-purrr.R # Generated by: usethis::use_standalone("r-lib/rlang", "purrr") # ---------------------------------------------------------------------- # # --- # repo: r-lib/rlang # file: standalone-purrr.R # last-updated: 2023-02-23 # license: https://unlicense.org # imports: rlang # --- # # This file provides a minimal shim to provide a purrr-like API on top of # base R functions. They are not drop-in replacements but allow a similar style # of programming. # # ## Changelog # # 2023-02-23: # * Added `list_c()` # # 2022-06-07: # * `transpose()` is now more consistent with purrr when inner names # are not congruent (#1346). # # 2021-12-15: # * `transpose()` now supports empty lists. # # 2021-05-21: # * Fixed "object `x` not found" error in `imap()` (@mgirlich) # # 2020-04-14: # * Removed `pluck*()` functions # * Removed `*_cpl()` functions # * Used `as_function()` to allow use of `~` # * Used `.` prefix for helpers # # nocov start map <- function(.x, .f, ...) { .f <- as_function(.f, env = global_env()) lapply(.x, .f, ...) } walk <- function(.x, .f, ...) { map(.x, .f, ...) invisible(.x) } map_lgl <- function(.x, .f, ...) { .rlang_purrr_map_mold(.x, .f, logical(1), ...) } map_int <- function(.x, .f, ...) { .rlang_purrr_map_mold(.x, .f, integer(1), ...) } map_dbl <- function(.x, .f, ...) { .rlang_purrr_map_mold(.x, .f, double(1), ...) } map_chr <- function(.x, .f, ...) { .rlang_purrr_map_mold(.x, .f, character(1), ...) } .rlang_purrr_map_mold <- function(.x, .f, .mold, ...) { .f <- as_function(.f, env = global_env()) out <- vapply(.x, .f, .mold, ..., USE.NAMES = FALSE) names(out) <- names(.x) out } map2 <- function(.x, .y, .f, ...) { .f <- as_function(.f, env = global_env()) out <- mapply(.f, .x, .y, MoreArgs = list(...), SIMPLIFY = FALSE) if (length(out) == length(.x)) { set_names(out, names(.x)) } else { set_names(out, NULL) } } map2_lgl <- function(.x, .y, .f, ...) { as.vector(map2(.x, .y, .f, ...), "logical") } map2_int <- function(.x, .y, .f, ...) { as.vector(map2(.x, .y, .f, ...), "integer") } map2_dbl <- function(.x, .y, .f, ...) { as.vector(map2(.x, .y, .f, ...), "double") } map2_chr <- function(.x, .y, .f, ...) { as.vector(map2(.x, .y, .f, ...), "character") } imap <- function(.x, .f, ...) { map2(.x, names(.x) %||% seq_along(.x), .f, ...) } pmap <- function(.l, .f, ...) { .f <- as.function(.f) args <- .rlang_purrr_args_recycle(.l) do.call( "mapply", c( FUN = list(quote(.f)), args, MoreArgs = quote(list(...)), SIMPLIFY = FALSE, USE.NAMES = FALSE ) ) } .rlang_purrr_args_recycle <- function(args) { lengths <- map_int(args, length) n <- max(lengths) stopifnot(all(lengths == 1L | lengths == n)) to_recycle <- lengths == 1L args[to_recycle] <- map(args[to_recycle], function(x) rep.int(x, n)) args } keep <- function(.x, .f, ...) { .x[.rlang_purrr_probe(.x, .f, ...)] } discard <- function(.x, .p, ...) { sel <- .rlang_purrr_probe(.x, .p, ...) .x[is.na(sel) | !sel] } map_if <- function(.x, .p, .f, ...) { matches <- .rlang_purrr_probe(.x, .p) .x[matches] <- map(.x[matches], .f, ...) .x } .rlang_purrr_probe <- function(.x, .p, ...) { if (is_logical(.p)) { stopifnot(length(.p) == length(.x)) .p } else { .p <- as_function(.p, env = global_env()) map_lgl(.x, .p, ...) } } compact <- function(.x) { .x[as.logical(lengths(.x))] } transpose <- function(.l) { if (!length(.l)) { return(.l) } inner_names <- names(.l[[1]]) if (is.null(inner_names)) { fields <- seq_along(.l[[1]]) } else { fields <- set_names(inner_names) .l <- map(.l, function(x) { if (is.null(names(x))) { set_names(x, inner_names) } else { x } }) } # This way missing fields are subsetted as `NULL` instead of causing # an error .l <- map(.l, as.list) map(fields, function(i) { map(.l, .subset2, i) }) } every <- function(.x, .p, ...) { .p <- as_function(.p, env = global_env()) for (i in seq_along(.x)) { if (!rlang::is_true(.p(.x[[i]], ...))) return(FALSE) } TRUE } some <- function(.x, .p, ...) { .p <- as_function(.p, env = global_env()) for (i in seq_along(.x)) { if (rlang::is_true(.p(.x[[i]], ...))) return(TRUE) } FALSE } negate <- function(.p) { .p <- as_function(.p, env = global_env()) function(...) !.p(...) } reduce <- function(.x, .f, ..., .init) { f <- function(x, y) .f(x, y, ...) Reduce(f, .x, init = .init) } reduce_right <- function(.x, .f, ..., .init) { f <- function(x, y) .f(y, x, ...) Reduce(f, .x, init = .init, right = TRUE) } accumulate <- function(.x, .f, ..., .init) { f <- function(x, y) .f(x, y, ...) Reduce(f, .x, init = .init, accumulate = TRUE) } accumulate_right <- function(.x, .f, ..., .init) { f <- function(x, y) .f(y, x, ...) Reduce(f, .x, init = .init, right = TRUE, accumulate = TRUE) } detect <- function(.x, .f, ..., .right = FALSE, .p = is_true) { .p <- as_function(.p, env = global_env()) .f <- as_function(.f, env = global_env()) for (i in .rlang_purrr_index(.x, .right)) { if (.p(.f(.x[[i]], ...))) { return(.x[[i]]) } } NULL } detect_index <- function(.x, .f, ..., .right = FALSE, .p = is_true) { .p <- as_function(.p, env = global_env()) .f <- as_function(.f, env = global_env()) for (i in .rlang_purrr_index(.x, .right)) { if (.p(.f(.x[[i]], ...))) { return(i) } } 0L } .rlang_purrr_index <- function(x, right = FALSE) { idx <- seq_along(x) if (right) { idx <- rev(idx) } idx } list_c <- function(x) { inject(c(!!!x)) } # nocov end testthat/R/quasi-label.R0000644000176200001440000000730015072252215014636 0ustar liggesusers#' Quasi-labelling #' #' The first argument to every `expect_` function can use unquoting to #' construct better labels. This makes it easy to create informative labels when #' expectations are used inside a function or a for loop. `quasi_label()` wraps #' up the details, returning the expression and label. #' #' @section Limitations: #' Because all `expect_` function use unquoting to generate more informative #' labels, you can not use unquoting for other purposes. Instead, you'll need #' to perform all other unquoting outside of the expectation and only test #' the results. #' #' @param quo A quosure created by `rlang::enquo()`. #' @param label An optional label to override the default. This is #' only provided for internal usage. Modern expectations should not #' include a `label` parameter. #' @param arg Argument name shown in error message if `quo` is missing. #' @keywords internal #' @return A list containing two elements: #' \item{val}{The evaluate value of `quo`} #' \item{lab}{The quasiquoted label generated from `quo`} #' @export #' @examples #' f <- function(i) if (i > 3) i * 9 else i * 10 #' i <- 10 #' #' # This sort of expression commonly occurs inside a for loop or function #' # And the failure isn't helpful because you can't see the value of i #' # that caused the problem: #' show_failure(expect_equal(f(i), i * 10)) #' #' # To overcome this issue, testthat allows you to unquote expressions using #' # !!. This causes the failure message to show the value rather than the #' # variable name #' show_failure(expect_equal(f(!!i), !!(i * 10))) quasi_label <- function(quo, label = NULL, arg = NULL) { if (is.null(arg)) { arg <- substitute(quo) if (is_call(arg, "enquo")) { arg <- arg[[2]] } arg <- as_label(arg) } force(quo) if (quo_is_missing(quo)) { cli::cli_abort( "argument {.arg {arg}} is missing, with no default.", call = caller_env() ) } expr <- quo_get_expr(quo) value <- eval_bare(expr, quo_get_env(quo)) label <- label %||% expr_label(expr) labelled_value(value, label) } labelled_value <- function(value, label) { if (missing(value)) { list(val = missing_arg(), lab = label) } else { list(val = value, lab = label) } } quasi_capture <- function(.quo, .label, .capture, ...) { act <- list() act$lab <- .label %||% quo_label(.quo) act$cap <- .capture( act$val <- eval_bare(quo_get_expr(.quo), quo_get_env(.quo)), ... ) act } expr_label <- function(x) { if (is_syntactic_literal(x)) { deparse1(x) } else if (is.name(x)) { paste0("`", as.character(x), "`") } else if (is_call(x)) { chr <- deparse(x) if (length(chr) > 1) { if (is_call(x, "function")) { x[[3]] <- quote(...) } else if (is_call_infix(x)) { left <- deparse(x[[2]], width.cutoff = 29) right <- deparse(x[[3]], width.cutoff = 28) if (length(left) > 1) { x[[2]] <- quote(expr = ...) } if (length(right) > 1) { x[[3]] <- quote(expr = ...) } } else { x <- call2(x[[1]], quote(expr = ...)) } } paste0("`", deparse1(x), "`") } else { # Any other object that's been inlined in x <- deparse(x) if (length(x) > 1) { x <- paste0(x[[1]], "...)") } x } } is_call_infix <- function(x) { if (!is_call(x, n = 2)) { return(FALSE) } fn <- x[[1]] if (!is_symbol(fn)) { return(FALSE) } name <- as_string(fn) base <- c( ":", "::", ":::", "$", "@", "^", "*", "/", "+", "-", ">", ">=", "<", "<=", "==", "!=", "!", "&", "&&", "|", "||", "~", "<-", "<<-" ) name %in% base || grepl("^%.*%$", name) } testthat/R/verify-output.R0000644000176200001440000001224115104634254015304 0ustar liggesusers#' Verify output #' #' @description #' `r lifecycle::badge("superseded")` #' #' This function is superseded in favour of `expect_snapshot()` and friends. #' #' This is a regression test that records interwoven code and output into a #' file, in a similar way to knitting an `.Rmd` file (but see caveats below). #' #' `verify_output()` is designed particularly for testing print methods and error #' messages, where the primary goal is to ensure that the output is helpful to #' a human. Obviously, you can't test that with code, so the best you can do is #' make the results explicit by saving them to a text file. This makes the output #' easy to verify in code reviews, and ensures that you don't change the output #' by accident. #' #' `verify_output()` is designed to be used with git: to see what has changed #' from the previous run, you'll need to use `git diff` or similar. #' #' @section Syntax: #' `verify_output()` can only capture the abstract syntax tree, losing all #' whitespace and comments. To mildly offset this limitation: #' #' - Strings are converted to R comments in the output. #' - Strings starting with `# ` are converted to headers in the output. #' #' @section CRAN: #' On CRAN, `verify_output()` will never fail, even if the output changes. #' This avoids false positives because tests of print methods and error #' messages are often fragile due to implicit dependencies on other packages, #' and failure does not imply incorrect computation, just a change in #' presentation. #' #' @param path Path to record results. #' #' This should usually be a call to [test_path()] in order to ensure that #' the same path is used when run interactively (when the working directory #' is typically the project root), and when run as an automated test (when #' the working directory will be `tests/testthat`). #' @param code Code to execute. This will usually be a multiline expression #' contained within `{}` (similarly to `test_that()` calls). #' @param width Width of console output #' @param crayon Enable cli/crayon package colouring? #' @param unicode Enable cli package UTF-8 symbols? If you set this to #' `TRUE`, call `skip_if(!cli::is_utf8_output())` to disable the #' test on your CI platforms that don't support UTF-8 (e.g. Windows). #' @param env The environment to evaluate `code` in. #' @export #' @keywords internal verify_output <- function( path, code, width = 80, crayon = FALSE, unicode = FALSE, env = caller_env() ) { local_reproducible_output(width = width, crayon = crayon, unicode = unicode) expr <- substitute(code) output <- verify_exec(expr, env = env) if (!interactive() && on_cran()) { skip("On CRAN") } expect_file_unchanged_(path, output, update = TRUE) invisible() } verify_exec <- function(expr, env = caller_env(), replay = output_replay) { if (is_call(expr, "{")) { exprs <- as.list(expr[-1]) } else { exprs <- list(expr) } device_path <- withr::local_tempfile(pattern = "verify_exec_") withr::local_pdf(device_path) grDevices::dev.control(displaylist = "enable") exprs <- lapply(exprs, function(x) { if (is.character(x)) paste0("# ", x) else expr_deparse(x) }) source <- unlist(exprs, recursive = FALSE) handler <- evaluate::new_output_handler( value = testthat_print, calling_handlers = list(error = function(cnd) rlang::entrace(cnd)) ) results <- evaluate::evaluate( source, envir = env, new_device = FALSE, output_handler = handler ) output <- unlist(lapply(results, replay)) output <- gsub("\r", "", output, fixed = TRUE) output } output_replay <- function(x) { UseMethod("output_replay", x) } #' @export output_replay.character <- function(x) { c(split_lines(x), "") } #' @export output_replay.source <- function(x) { lines <- split_lines(x$src) # Remove header of lines so they don't get prefixed first <- lines[[1]] if (grepl("^# # ", first)) { header <- gsub("^# # ", "", first) lines <- lines[-1] } else { header <- NULL } n <- length(lines) if (n > 0) { lines[1] <- paste0("> ", lines[1]) if (n > 1) { lines[2:n] <- paste0("+ ", lines[2:n]) } } if (!is.null(header)) { underline <- strrep("=", nchar(header)) lines <- c("", header, underline, "", lines) } lines } #' @export output_replay.error <- function(x) { msg <- cnd_message(x) if (is.null(x$call)) { msg <- paste0("Error: ", msg) } else { call <- deparse(x$call) msg <- paste0("Error in ", call, ": ", msg) } c(split_lines(msg), "") } #' @export output_replay.warning <- function(x) { msg <- cnd_message(x) if (is.null(x$call)) { msg <- paste0("Warning: ", msg) } else { call <- deparse(x$call) msg <- paste0("Warning in ", call, ": ", msg) } c(split_lines(msg), "") } #' @export output_replay.message <- function(x) { # Messages are the only conditions where a new line is appended automatically msg <- paste0("Message: ", sub("\n$", "", cnd_message(x))) c(split_lines(msg), "") } #' @export output_replay.recordedplot <- function(x) { cli::cli_abort("Plots are not supported.") } # Helpers ------------------------------------------------------------ split_lines <- function(x) { strsplit(x, "\n")[[1]] } testthat/R/compare.R0000644000176200001440000002247015047715224014100 0ustar liggesusers#' Provide human-readable comparison of two objects #' #' @description #' `r lifecycle::badge("superseded")` #' #' `compare` is similar to [base::all.equal()], but somewhat buggy in its #' use of `tolerance`. Please use [waldo](https://waldo.r-lib.org/) instead. #' #' @export #' @param x,y Objects to compare #' @param ... Additional arguments used to control specifics of comparison #' @keywords internal #' @order 1 compare <- function(x, y, ...) { UseMethod("compare", x) } comparison <- function(equal = TRUE, message = "Equal") { check_bool(equal) check_character(message) structure( list( equal = equal, message = paste(message, collapse = "\n") ), class = "comparison" ) } difference <- function(..., fmt = "%s") { comparison(FALSE, sprintf(fmt, ...)) } no_difference <- function() { comparison() } #' @export print.comparison <- function(x, ...) { if (x$equal) { cat("Equal\n") return() } cat(x$message) } #' @export #' @rdname compare #' @order 2 compare.default <- function(x, y, ..., max_diffs = 9) { same <- all.equal(x, y, ...) if (length(same) > max_diffs) { same <- c(same[1:max_diffs], "...") } comparison(identical(same, TRUE), as.character(same)) } print_out <- function(x, ...) { lines <- capture_output_lines(x, ..., print = TRUE) paste0(lines, collapse = "\n") } # Common helpers --------------------------------------------------------------- same_length <- function(x, y) length(x) == length(y) diff_length <- function(x, y) { difference(fmt = "Lengths differ: %i is not %i", length(x), length(y)) } same_type <- function(x, y) identical(typeof(x), typeof(y)) diff_type <- function(x, y) { difference(fmt = "Types not compatible: %s is not %s", typeof(x), typeof(y)) } same_class <- function(x, y) { if (!is.object(x) && !is.object(y)) { return(TRUE) } identical(class(x), class(y)) } diff_class <- function(x, y) { difference( fmt = "Classes differ: %s is not %s", format_class(class(x)), format_class(class(y)) ) } same_attr <- function(x, y) { is.null(attr.all.equal(x, y)) } diff_attr <- function(x, y) { out <- attr.all.equal(x, y) difference(out) } vector_equal <- function(x, y) { (is.na(x) & is.na(y)) | (!is.na(x) & !is.na(y) & x == y) } vector_equal_tol <- function(x, y, tolerance = .Machine$double.eps^0.5) { (is.na(x) & is.na(y)) | (!is.na(x) & !is.na(y)) & (x == y | abs(x - y) < tolerance) } # character --------------------------------------------------------------- #' @param max_diffs Maximum number of differences to show #' @param max_lines Maximum number of lines to show from each difference #' @param check.attributes If `TRUE`, also checks values of attributes. #' @param width Width of output device #' @rdname compare #' @export #' @examples #' # Character ----------------------------------------------------------------- #' x <- c("abc", "def", "jih") #' compare(x, x) #' #' y <- paste0(x, "y") #' compare(x, y) #' #' compare(letters, paste0(letters, "-")) #' #' x <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus #' tincidunt auctor. Vestibulum ac metus bibendum, facilisis nisi non, pulvinar #' dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " #' y <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus #' tincidunt auctor. Vestibulum ac metus1 bibendum, facilisis nisi non, pulvinar #' dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " #' compare(x, y) #' compare(c(x, x), c(y, y)) #' compare.character <- function( x, y, check.attributes = TRUE, ..., max_diffs = 5, max_lines = 5, width = cli::console_width() ) { if (identical(x, y)) { return(no_difference()) } if (!same_type(x, y)) { return(diff_type(x, y)) } if (!same_class(x, y)) { return(diff_class(x, y)) } if (!same_length(x, y)) { return(diff_length(x, y)) } if (check.attributes && !same_attr(x, y)) { return(diff_attr(x, y)) } diff <- !vector_equal(x, y) if (!any(diff)) { no_difference() } else { mismatches <- mismatch_character(x, y, diff) difference(format( mismatches, max_diffs = max_diffs, max_lines = max_lines, width = width )) } } mismatch_character <- function(x, y, diff = !vector_equal(x, y)) { structure( list( i = which(diff), x = x[diff], y = y[diff], n = length(diff), n_diff = sum(diff) ), class = "mismatch_character" ) } #' @export format.mismatch_character <- function( x, ..., max_diffs = 5, max_lines = 5, width = cli::console_width() ) { width <- width - 6 # allocate space for labels n_show <- seq_len(min(x$n_diff, max_diffs)) encode <- function(x) encodeString(x, quote = '"') show_x <- str_trunc(encode(x$x[n_show]), width * max_lines) show_y <- str_trunc(encode(x$y[n_show]), width * max_lines) show_i <- x$i[n_show] sidebyside <- Map( function(x, y, pos) { x <- paste0("x[", pos, "]: ", str_chunk(x, width)) y <- paste0("y[", pos, "]: ", str_chunk(y, width)) paste(c(x, y), collapse = "\n") }, show_x, show_y, show_i ) summary <- paste0(x$n_diff, "/", x$n, " mismatches") paste0(summary, "\n", paste0(sidebyside, collapse = "\n\n")) } #' @export print.mismatch_character <- function(x, ...) { cat(format(x, ...), "\n", sep = "") } str_trunc <- function(x, length) { too_long <- nchar(x) > length x[too_long] <- paste0(substr(x[too_long], 1, length - 3), "...") x } str_chunk <- function(x, length) { lines <- ceiling(nchar(x) / length) start <- (seq_len(lines) - 1) * length + 1 substring(x, start, start + length - 1) } # compare.numeric --------------------------------------------------------- #' @export #' @rdname compare #' @param tolerance Numerical tolerance: any differences (in the sense of #' [base::all.equal()]) smaller than this value will be ignored. #' #' The default tolerance is `sqrt(.Machine$double.eps)`, unless long doubles #' are not available, in which case the test is skipped. #' @examples #' # Numeric ------------------------------------------------------------------- #' #' x <- y <- runif(100) #' y[sample(100, 10)] <- 5 #' compare(x, y) #' #' x <- y <- 1:10 #' x[5] <- NA #' x[6] <- 6.5 #' compare(x, y) #' #' # Compare ignores minor numeric differences in the same way #' # as all.equal. #' compare(x, x + 1e-9) compare.numeric <- function( x, y, tolerance = testthat_tolerance(), check.attributes = TRUE, ..., max_diffs = 9 ) { all_equal <- all.equal( x, y, tolerance = tolerance, check.attributes = check.attributes, ... ) if (isTRUE(all_equal)) { return(no_difference()) } if (!typeof(y) %in% c("integer", "double")) { return(diff_type(x, y)) } if (!same_class(x, y)) { return(diff_class(x, y)) } if (!same_length(x, y)) { return(diff_length(x, y)) } if (check.attributes && !same_attr(x, y)) { return(diff_attr(x, y)) } diff <- !vector_equal_tol(x, y, tolerance = tolerance) if (!any(diff)) { no_difference() } else { mismatches <- mismatch_numeric(x, y, diff) difference(format(mismatches, max_diffs = max_diffs)) } } #' Default numeric tolerance #' #' testthat's default numeric tolerance is `r testthat_tolerance()`. #' #' @keywords internal #' @export testthat_tolerance <- function() { if (identical(capabilities("long.double"), FALSE)) { skip("Long doubles not available and `tolerance` not supplied") } .Machine$double.eps^0.5 } mismatch_numeric <- function(x, y, diff = !vector_equal(x, y)) { structure( list( i = which(diff), x = x[diff], y = y[diff], n = length(diff), n_diff = sum(diff), mu_diff = mean(abs(x[diff] - y[diff]), na.rm = TRUE) ), class = "mismatch_numeric" ) } #' @export format.mismatch_numeric <- function(x, ..., max_diffs = 9, digits = 3) { summary <- paste0(x$n_diff, "/", x$n, " mismatches") if (x$n_diff > 1) { mu <- format(x$mu_diff, digits = digits, trim = TRUE) summary <- paste0(summary, " (average diff: ", mu, ")") } n_show <- seq_len(min(x$n_diff, max_diffs)) diffs <- paste0( format(paste0("[", x$i[n_show], "]")), " ", format(x$x[n_show], digits = digits), " - ", format(x$y[n_show], digits = digits), " == ", format(x$x[n_show] - x$y[n_show], digits = digits) ) if (x$n_diff > length(n_show)) { diffs <- c(diffs, "...") } paste0(summary, "\n", paste(diffs, collapse = "\n")) } #' @export print.mismatch_numeric <- function(x, ...) { cat(format(x, ...), "\n", sep = "") } # compare.time ------------------------------------------------------------ #' @rdname compare #' @export compare.POSIXt <- function(x, y, tolerance = 0.001, ..., max_diffs = 9) { if (!inherits(y, "POSIXt")) { return(diff_class(x, y)) } if (!same_length(x, y)) { return(diff_length(x, y)) } x <- standardise_tzone(as.POSIXct(x)) y <- standardise_tzone(as.POSIXct(y)) if (!same_attr(x, y)) { return(diff_attr(x, y)) } diff <- !vector_equal_tol(x, y, tolerance = tolerance) if (!any(diff)) { no_difference() } else { mismatches <- mismatch_numeric(x, y, diff) difference(format(mismatches, max_diffs = max_diffs)) } } standardise_tzone <- function(x) { if ( is.null(attr(x, "tzone")) || identical(attr(x, "tzone"), Sys.timezone()) ) { attr(x, "tzone") <- "" } x } testthat/R/edition.R0000644000176200001440000000346315047715224014106 0ustar liggesusersfind_edition <- function(path, package = NULL) { from_environment <- Sys.getenv("TESTTHAT_EDITION") if (nzchar(from_environment)) { return(as.integer(from_environment)) } desc <- find_description(path, package) if (is.null(desc)) { return(2L) } as.integer(desc$get_field("Config/testthat/edition", default = 2L)) } find_description <- function(path, package = NULL) { if (!is.null(package)) { return(desc::desc(package = package)) } else { tryCatch( pkgload::pkg_desc(path), error = function(e) NULL ) } } edition_deprecate <- function(in_edition, what, instead = NULL) { if (edition_get() < in_edition) { return() } cli::cli_warn(c( "{.code {what}} was deprecated in {edition_name(in_edition)}.", i = instead )) } edition_require <- function(in_edition, what) { if ( edition_get() >= in_edition || isTRUE(getOption("testthat.edition_ignore")) ) { return() } cli::cli_abort("{.code {what}} requires {edition_name(in_edition)}.") } edition_name <- function(x) { if (x == 2) { "the 2nd edition" } else if (x == 3) { "the 3rd edition" } else { paste("edition ", x) } } #' Temporarily change the active testthat edition #' #' `local_edition()` allows you to temporarily (within a single test or #' a single test file) change the active edition of testthat. #' `edition_get()` allows you to retrieve the currently active edition. #' #' @export #' @param x Edition Should be a single integer. #' @param .env Environment that controls scope of changes. For expert use only. local_edition <- function(x, .env = parent.frame()) { check_number_whole(x, min = 2, max = 3) local_bindings(edition = x, .env = the, .frame = .env) } #' @export #' @rdname local_edition edition_get <- function() { the$edition %||% find_edition(".") } testthat/R/reporter-timing.R0000644000176200001440000000500015053661631015566 0ustar liggesusers#' Test reporter: show timings for slow tests #' #' @description #' `SlowReporter` is designed to identify slow tests. It reports the #' execution time for each test, ignoring tests faster than a specified #' threshold (default: 0.5s). #' #' The easiest way to run it over your package is with #' `devtools::test(reporter = "slow")`. #' #' @export #' @family reporters SlowReporter <- R6::R6Class( "SlowReporter", inherit = Reporter, public = list( min_time = NA_real_, test_timings = NULL, current_test_start = NULL, current_file = NULL, initialize = function(min_time = 0.5, ...) { check_number_decimal(min_time, min = 0) super$initialize(...) self$min_time <- min_time self$test_timings <- list() }, start_file = function(file) { self$current_file <- file }, start_test = function(context, test) { self$current_test_start <- proc.time()[[3]] }, end_test = function(context, test) { if (is.null(self$current_test_start)) { return() } time_taken <- proc.time()[[3]] - self$current_test_start # Store timing information timing <- list( file = self$current_file, test = test, time = time_taken ) self$test_timings[[length(self$test_timings) + 1]] <- timing if (time_taken >= self$min_time) { self$show_timing(timing) } self$current_test_start <- NULL }, end_reporter = function() { if (length(self$test_timings) == 0) { return() } all_times <- map_dbl(self$test_timings, \(x) x$time) is_slow <- all_times >= self$min_time self$cat_line() self$rule(cli::style_bold("Summary"), line = 2) self$cat_line("All tests: ", sprintf("%.2fs", sum(all_times))) self$cat_line("Slow tests: ", sprintf("%.2fs", sum(all_times[is_slow]))) if (sum(is_slow) <= 10) { return() } # Sort by time descending for summary slowest <- self$test_timings[order(all_times, decreasing = TRUE)] self$cat_line() self$rule(cli::style_bold("Slowest tests:"), line = 1) # Show top 10 slowest tests for (i in 1:10) { self$show_timing(slowest[[i]]) } if (length(slowest) > 10) { self$cat_line("... and ", length(slowest) - 10, " more slow tests") } self$cat_line() }, show_timing = function(timing) { time <- sprintf("%.2fs", timing$time) self$cat_line("[", time, "] ", timing$file, ": ", timing$test) } ) ) testthat/R/test-env.R0000644000176200001440000000357615042723556014227 0ustar liggesusers#' Determine testing status #' #' @description #' These functions help you determine if you code is running in a particular #' testing context: #' #' * `is_testing()` is `TRUE` inside a test. #' * `is_snapshot()` is `TRUE` inside a snapshot test #' * `is_checking()` is `TRUE` inside of `R CMD check` (i.e. by [test_check()]). #' * `is_parallel()` is `TRUE` if the tests are run in parallel. #' * `testing_package()` gives name of the package being tested. #' #' A common use of these functions is to compute a default value for a `quiet` #' argument with `is_testing() && !is_snapshot()`. In this case, you'll #' want to avoid an run-time dependency on testthat, in which case you should #' just copy the implementation of these functions into a `utils.R` or similar. #' #' @export is_testing <- function() { identical(Sys.getenv("TESTTHAT"), "true") } #' @export #' @rdname is_testing is_parallel <- function() { identical(Sys.getenv("TESTTHAT_IS_PARALLEL"), "true") } #' @export #' @rdname is_testing is_checking <- function() { identical(Sys.getenv("TESTTHAT_IS_CHECKING"), "true") } #' @export #' @rdname is_testing is_snapshot <- function() { identical(Sys.getenv("TESTTHAT_IS_SNAPSHOT"), "true") } #' @export #' @rdname is_testing testing_package <- function() { Sys.getenv("TESTTHAT_PKG") } #' Generate default testing environment. #' #' We use a new environment which inherits from [globalenv()] or a package #' namespace. In an ideal world, we'd avoid putting the global environment on #' the search path for tests, but it's not currently possible without losing #' the ability to load packages in tests. #' #' @keywords internal #' @export test_env <- function(package = NULL) { if (is.null(package)) { env(globalenv()) } else { # Must clone environment so that during R CMD check, it's not locked # preventing creation of S4 classes env_clone(asNamespace(package)) } } testthat/R/reporter-check.R0000644000176200001440000001002515127554030015353 0ustar liggesusers#' Report results for `R CMD check` #' #' `R CMD check` displays only the last 13 lines of the result, so this #' report is designed to ensure that you see something useful there. #' #' @export #' @family reporters CheckReporter <- R6::R6Class( "CheckReporter", inherit = Reporter, public = list( problems = NULL, skips = NULL, warnings = NULL, n_ok = 0L, old_in_check_reporter = NULL, initialize = function(...) { self$capabilities$parallel_support <- TRUE self$problems <- Stack$new() self$warnings <- Stack$new() self$skips <- Stack$new() super$initialize(...) }, add_result = function(context, test, result) { if (expectation_broken(result)) { self$problems$push(result) try(save_test(result$srcref, "_problems"), silent = TRUE) } else if (expectation_warning(result)) { self$warnings$push(result) } else if (expectation_skip(result)) { self$skips$push(result) } else { self$n_ok <- self$n_ok + 1L } }, start_reporter = function() { self$old_in_check_reporter <- in_check_reporter() the$in_check_reporter <- TRUE }, end_reporter = function() { the$in_check_reporter <- self$old_in_check_reporter if (self$skips$size() || self$warnings$size() || self$problems$size()) { self$cat_line(summary_line( n_fail = self$problems$size(), n_warn = self$warnings$size(), n_skip = self$skips$size(), n_pass = self$n_ok )) self$cat_line() } skip_report(self, line = 2) # Don't show warnings in revdep checks in order to focus on failures if (self$warnings$size() > 0 && !on_cran()) { warnings <- self$warnings$as_list() self$rule("Warnings", line = 2) self$cat_line(map_chr(warnings, issue_summary, rule = TRUE)) self$cat_line() } if (self$problems$size() > 0) { problems <- self$problems$as_list() saveRDS(problems, "testthat-problems.rds", version = 2) self$rule("Failed tests", line = 2) self$cat_line(map_chr(problems, issue_summary, rule = TRUE)) self$cat_line() if (some(problems, \(x) isTRUE(attr(x, "snapshot")))) { self$rule("Snapshots", line = 1) self$cat_line(snapshot_check_hint()) } } else { # clean up unlink("testthat-problems.rds") } self$cat_line(summary_line( n_fail = self$problems$size(), n_warn = self$warnings$size(), n_skip = self$skips$size(), n_pass = self$n_ok )) } ) ) summary_line <- function(n_fail, n_warn, n_skip, n_pass) { colourise_if <- function(text, colour, cond) { if (cond) colourise(text, colour) else text } # Ordered from most important to least important paste0( "[ ", colourise_if("FAIL", "failure", n_fail > 0), " ", n_fail, " | ", colourise_if("WARN", "warn", n_warn > 0), " ", n_warn, " | ", colourise_if("SKIP", "skip", n_skip > 0), " ", n_skip, " | ", colourise_if("PASS", "success", n_fail == 0), " ", n_pass, " ]" ) } snapshot_check_hint <- function() { intro <- "To review and process snapshots locally:" if (on_gh() && Sys.getenv("GITHUB_JOB") == "R-CMD-check") { repository <- Sys.getenv("GITHUB_REPOSITORY") run_id <- Sys.getenv("GITHUB_RUN_ID") call <- sprintf( "testthat::snapshot_download_gh(\"%s\", \"%s\")", repository, run_id ) copy <- sprintf("* Run `%s` to download snapshots.", call) } else { copy <- c( if (on_ci()) { "* Download and unzip artifact." } else { "* Locate check directory." }, "* Copy 'tests/testthat/_snaps' to local package." ) } action <- c( "* Run `testthat::snapshot_accept()` to accept all changes.", "* Run `testthat::snapshot_review()` to review all changes." ) c(intro, copy, action) } run <- function(x) { cli::format_inline(paste0("{.run testthat::", x, "}")) } testthat/vignettes/0000755000176200001440000000000015130237654014130 5ustar liggesuserstestthat/vignettes/third-edition.Rmd0000644000176200001440000002315115072252215017334 0ustar liggesusers--- title: "testthat 3e" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{testthat 3e} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` testthat 3.0.0 introduces the idea of an "edition" of testthat. An edition is a bundle of behaviours that you have to explicitly choose to use, allowing us to make otherwise backward incompatible changes. This is particularly important for testthat since it has a very large number of packages that use it (almost 5,000 at last count). Choosing to use the 3rd edition allows you to use our latest recommendations for ongoing and new work, while historical packages continue to use the old behaviour. (We don't anticipate creating new editions very often, and they'll always be matched with major version, i.e. if there's another edition, it'll be the fourth edition and will come with testthat 4.0.0.) This vignette shows you how to activate the 3rd edition, introduces the main features, and discusses common challenges when upgrading a package. If you have a problem that this vignette doesn't cover, please let me know, as it's likely that the problem also affects others. ```{r, message = FALSE} library(testthat) local_edition(3) ``` ## Activating The usual way to activate the 3rd edition is to add a line to your `DESCRIPTION`: Config/testthat/edition: 3 This will activate the 3rd edition for every test in your package. You can also control the edition used for individual tests with `testthat::local_edition()`: ```{r} test_that("I can use the 3rd edition", { local_edition(3) expect_true(TRUE) }) ``` This is also useful if you've switched to the 3rd edition and have a couple of tests that fail. You can use `local_edition(2)` to revert back to the old behaviour, giving you some breathing room to figure out the underlying issue. ```{r} test_that("I want to use the 2nd edition", { local_edition(2) expect_true(TRUE) }) ``` ## Changes There are three major changes in the 3rd edition: - A number of outdated functions are now **deprecated**, so you'll be warned about them every time you run your tests (but they won't cause `R CMD check` to fail). - testthat no longer silently swallows **messages**; you now need to deliberately handle them. - `expect_equal()` and `expect_identical()` now use the [**waldo**](https://waldo.r-lib.org/) package instead of `identical()` and `all.equal()`. This makes them more consistent and provides an enhanced display of differences when a test fails. ### Deprecations A number of outdated functions have been deprecated. Most of these functions have not been recommended for a number of years, but before the introduction of the edition idea, I didn't have a good way of preventing people from using them without breaking a lot of code on CRAN. - `context()` is formally deprecated. testthat has been moving away from `context()` in favour of file names for quite some time, and now you'll be strongly encouraged remove these calls from your tests. - `expect_is()` is deprecated in favour of the more specific `expect_type()`, `expect_s3_class()`, and `expect_s4_class()`. This ensures that you check the expected class along with the expected OO system. - The very old `expect_that()` syntax is now deprecated. This was an overly clever API that I regretted even before the release of testthat 1.0.0. - `expect_equivalent()` has been deprecated since it is now equivalent (HA HA) to `expect_equal(ignore_attr = TRUE)`. - `setup()` and `teardown()` are deprecated in favour of test fixtures. See `vignette("test-fixtures")` for details. - `expect_known_output()`, `expect_known_value()`, `expect_known_hash()`, and `expect_equal_to_reference()` are all deprecated in favour of `expect_snapshot_output()` and `expect_snapshot_value()`. - `with_mock()` and `local_mock()` are deprecated; please use `with_mocked_bindings()` or `local_mocked_bindings()` instead. Fixing these deprecation warnings should be straightforward. ### Warnings In the second edition, `expect_warning()` swallows all warnings regardless of whether or not they match the `regexp` or `class`: ```{r} f <- function() { warning("First warning") warning("Second warning") warning("Third warning") } local_edition(2) expect_warning(f(), "First") ``` In the third edition, `expect_warning()` captures at most one warning so the others will bubble up: ```{r} local_edition(3) expect_warning(f(), "First") ``` You can either add additional expectations to catch these warnings, or silence them all with `suppressWarnings()`: ```{r} f() |> expect_warning("First") |> expect_warning("Second") |> expect_warning("Third") f() |> expect_warning("First") |> suppressWarnings() ``` Alternatively, you might want to capture them all in a snapshot test: ```{r} test_that("f() produces expected outputs/messages/warnings", { expect_snapshot(f()) }) ``` The same principle also applies to `expect_message()`, but message handling has changed in a more radical way, as described next. ### Messages For reasons that I can no longer remember, testthat silently ignores all messages. This is inconsistent with other types of output, so as of the 3rd edition, they now bubble up to your test results. You'll have to explicit ignore them with `suppressMessages()`, or if they're important, test for their presence with `expect_message()`. ### waldo Probably the biggest day-to-day difference (and the biggest reason to upgrade!) is the use of [`waldo::compare()`](https://waldo.r-lib.org/reference/compare.html) inside of `expect_equal()` and `expect_identical()`. The goal of waldo is to find and concisely describe the difference between a pair of R objects, and it's designed specifically to help you figure out what's gone wrong in your unit tests. ```{r, error = TRUE} f1 <- factor(letters[1:3]) f2 <- ordered(letters[1:3], levels = letters[1:4]) local_edition(2) expect_equal(f1, f2) local_edition(3) expect_equal(f1, f2) ``` waldo looks even better in your console because it carefully uses colours to help highlight the differences. The use of waldo also makes precise the difference between `expect_equal()` and `expect_identical()`: `expect_equal()` sets `tolerance` so that waldo will ignore small numerical differences arising from floating point computation. Otherwise the functions are identical (HA HA). This change is likely to result in the most work during an upgrade, because waldo can give slightly different results to both `identical()` and `all.equal()` in moderately common situations. I believe on the whole the differences are meaningful and useful, so you'll need to handle them by tweaking your tests. The following changes are most likely to affect you: - `expect_equal()` previously ignored the environments of formulas and functions. This is most like to arise if you are testing models. It's worth thinking about what the correct values should be, but if that is to annoying you can opt out of the comparison with `ignore_function_env` or `ignore_formula_env`. - `expect_equal()` used a combination of `all.equal()` and a home-grown `testthat::compare()` which unfortunately used a slightly different definition of tolerance. Now `expect_equal()` always uses the same definition of tolerance everywhere, which may require tweaks to your existing tolerance values. - `expect_equal()` previously ignored timezone differences when one object had the current timezone set implicitly (with `""`) and the other had it set explicitly: ```{r, error = TRUE} dt1 <- dt2 <- ISOdatetime(2020, 1, 2, 3, 4, 0) attr(dt1, "tzone") <- "" attr(dt2, "tzone") <- Sys.timezone() local_edition(2) expect_equal(dt1, dt2) local_edition(3) expect_equal(dt1, dt2) ``` ### Reproducible output In the third edition, `test_that()` automatically calls `local_reproducible_output()` which automatically sets a number of options and environment variables to ensure output is as reproducible across systems. This includes setting: - `options(crayon.enabled = FALSE)` and `options(cli.unicode = FALSE)` so that the crayon and cli packages produce raw ASCII output. - `Sys.setLocale("LC_COLLATE" = "C")` so that sorting a character vector returns the same order regardless of the system language. - `options(width = 80)` so print methods always generate the same output regardless of your actual console width. See the documentation for more details. ## Upgrading The changes lend themselves to the following workflow for upgrading from the 2nd to the 3rd edition: 1. Activate edition 3. You can let [`usethis::use_testthat(3)`](https://usethis.r-lib.org/reference/use_testthat.html) do this for you. 2. Remove or replace deprecated functions, going over the list of above. 3. If your output got noisy, quiet things down by either capturing or suppressing warnings and messages. 4. Inspect test outputs if objects are not "all equal" anymore. ## Alternatives You might wonder why we came up with the idea of an "edition", rather than creating a new package like testthat3. We decided against making a new package because the 2nd and 3rd edition share a very large amount of code, so making a new package would have substantially increased the maintenance burden: the majority of bugs would've needed to be fixed in two places. If you're a programmer in other languages, you might wonder why we can't rely on [semantic versioning](https://semver.org). The main reason is that CRAN checks all packages that use testthat with the latest version of testthat, so simply incrementing the major version number doesn't actually help with reducing R CMD check failures on CRAN. testthat/vignettes/snapshotting.Rmd0000644000176200001440000003647315067547665017351 0ustar liggesusers--- title: "Snapshot tests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Snapshot tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) set.seed(1014) ``` The goal of a unit test is to record the expected output of a function using code. This is a powerful technique because it not only ensures that code doesn't change unexpectedly, but it also expresses the desired behavior in a way that a human can understand. However, it's not always convenient to record the expected behavior with code. Some challenges include: - Text output that includes many characters like quotes and newlines that require special handling in a string. - Output that is large, making it painful to define the reference output and bloating the size of the test file. - Binary formats like plots or images, which are very difficult to describe in code: e.g., the plot looks right, the error message is actionable, or the print method uses color effectively. For these situations, testthat provides an alternative mechanism: snapshot tests. Instead of using code to describe expected output, snapshot tests (also known as [golden tests](https://ro-che.info/articles/2017-12-04-golden-tests)) record results in a separate human-readable file. Snapshot tests in testthat are inspired primarily by [Jest](https://jestjs.io/docs/en/snapshot-testing), thanks to a number of very useful discussions with Joe Cheng. ```{r setup} library(testthat) ``` ```{r include = FALSE} snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") ``` ## Basic workflow We'll illustrate the basic workflow with a simple function that generates HTML bullets. It can optionally include an `id` attribute, which allows you to construct a link directly to that list. ```{r} bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } cat(bullets("a", id = "x")) ``` Testing this simple function is relatively painful. To write the test you have to carefully escape the newlines and quotes. And then when you re-read the test in the future, all that escaping makes it hard to tell exactly what it's supposed to return. ```{r} test_that("bullets", { expect_equal(bullets("a"), "
      \n
    • a
    • \n
    \n") expect_equal(bullets("a", id = "x"), "
      \n
    • a
    • \n
    \n") }) ``` This is a great place to use snapshot testing. To do this we make two changes to our code: - We use `expect_snapshot()` instead of `expect_equal()` - We wrap the call in `cat()` (to avoid `[1]` in the output, like in the first interactive example above). This yields the following test: ```{r} test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` ```{r, include = FALSE} # Reset snapshot test snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ``` When we run the test for the first time, it automatically generates reference output and prints it, so that you can visually confirm that it's correct. The output is automatically saved in `_snaps/{name}.md`. The name of the snapshot matches your test file name --- e.g. if your test is `test-pizza.R` then your snapshot will be saved in `tests/testthat/_snaps/pizza.md`. As the file name suggests, this is a markdown file, which I'll explain shortly. If you run the test again, it'll succeed: ```{r} test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` ```{r} #| include: false # finalise snapshot to in order to get an error snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ``` But if you change the underlying code, say to tweak the indenting, the test will fail: ```{r, error = TRUE} bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` If this is a deliberate change, you can follow the advice in the message and update the snapshots for that file by running `snapshot_accept("pizza")`; otherwise, you can fix the bug and your tests will pass once more. (You can also accept snapshots for all files with `snapshot_accept()`.) If you delete the test, the corresponding snapshot will be removed the next time you run the tests. If you delete all snapshots in the file, the entire snapshot file will be deleted the next time you run all the tests. ### Snapshot format Snapshots are recorded using a subset of markdown. You might wonder why we use markdown. We use it because it's important that snapshots be human-readable because humans have to read them during code reviews. Reviewers often don't run your code but still want to understand the changes. Here's the snapshot file generated by the test above: ``` md # bullets
    • a
    ---
    • a
    ``` Each test starts with `# {test name}`, a level 1 heading. Within a test, each snapshot expectation is indented by four spaces, i.e., as code, and they are separated by `---`, a horizontal rule. ### Interactive usage Because the snapshot output uses the name of the current test file and the current test, snapshot expectations don't really work when run interactively at the console. Since they can't automatically find the reference output, they instead just print the current value for manual inspection. ## Testing errors So far we've focused on snapshot tests for output printed to the console. But `expect_snapshot()` also captures messages, errors, and warnings[^1]. Messages and warnings are straightforward, but capturing errors is *slightly* more difficult because `expect_snapshot()` will fail if there's an error: ```{r} #| error: true test_that("you can't add a number and a letter", { expect_snapshot(1 + "a") }) ``` This is a safety valve that ensures that you don't accidentally write broken code. To deliberately snapshot an error, you'll have to specifically request it with `error = TRUE`: ```{r} test_that("you can't add a number and a letter", { expect_snapshot(1 + "a", error = TRUE) }) ``` When the code gets longer, I like to put `error = TRUE` up front so it's a little more obvious: ```{r} test_that("you can't add weird things", { expect_snapshot(error = TRUE, { 1 + "a" mtcars + iris Sys.Date() + factor() }) }) ``` Just be careful: when you set `error = TRUE`, `expect_snapshot()` checks that at least one expression throws an error, not that every expression throws an error. For example, look above and notice that adding a date and a factor generated a warning, not an error. Snapshot tests are particularly important when testing complex error messages, such as those that you might generate with cli. Here's a more realistic example illustrating how you might test `check_unnamed()`, a function that ensures all arguments in `...` are unnamed. ```{r} check_unnamed <- function(..., call = parent.frame()) { names <- ...names() has_name <- names != "" if (!any(has_name)) { return(invisible()) } named <- names[has_name] cli::cli_abort( c( "All elements of {.arg ...} must be unnamed.", i = "You supplied argument{?s} {.arg {named}}." ), call = call ) } test_that("no errors if all arguments unnamed", { expect_no_error(check_unnamed()) expect_no_error(check_unnamed(1, 2, 3)) }) test_that("actionable feedback if some or all arguments named", { expect_snapshot(error = TRUE, { check_unnamed(x = 1, 2) check_unnamed(x = 1, y = 2) }) }) ``` ## Other challenges ### Varying outputs Sometimes part of the output varies in ways that you can't easily control. In many cases, it's convenient to use mocking (`vignette("mocking")`) to ensure that every run of the function always produces the same output. In other cases, it's easier to manipulate the text output with a regular expression or similar. That's the job of the `transform` argument, which should be passed a function that takes a character vector of lines and returns a modified vector. This type of problem often crops up when you are testing a function that gives feedback about a path. In your tests, you'll typically use a temporary path (e.g., from `withr::local_tempfile()`), so if you display the path in a snapshot, it will be different every time. For example, consider this "safe" version of `writeLines()` that requires you to explicitly opt in to overwriting an existing file: ```{r} safe_write_lines <- function(lines, path, overwrite = FALSE) { if (file.exists(path) && !overwrite) { cli::cli_abort(c( "{.path {path}} already exists.", i = "Set {.code overwrite = TRUE} to overwrite" )) } writeLines(lines, path) } ``` If you use a snapshot test to confirm that the error message is useful, the snapshot will be different every time the test is run: ```{r} #| include: false snapper$end_file() snapper$start_file("snapshotting.Rmd", "safe-write-lines") ``` ```{r} test_that("generates actionable error message", { path <- withr::local_tempfile(lines = "") expect_snapshot(safe_write_lines(letters, path), error = TRUE) }) ``` ```{r} #| include: false snapper$end_file() snapper$start_file("snapshotting.Rmd", "safe-write-lines") ``` ```{r} #| error: true test_that("generates actionable error message", { path <- withr::local_tempfile(lines = "") expect_snapshot(safe_write_lines(letters, path), error = TRUE) }) ``` ```{r} #| include: false snapper$end_file() snapper$start_file("snapshotting.Rmd", "test-2") ``` One way to fix this problem is to use the `transform` argument to replace the temporary path with a fixed value: ```{r} test_that("generates actionable error message", { path <- withr::local_tempfile(lines = "") expect_snapshot( safe_write_lines(letters, path), error = TRUE, transform = \(lines) gsub(path, "", lines, fixed = TRUE) ) }) ``` Now even though the path varies, the snapshot does not. ### `local_reproducible_output()` By default, testthat sets a number of options that simplify and standardize output: * The console width is set to 80. * {cli} ANSI coloring and hyperlinks are suppressed. * Unicode characters are suppressed. These are sound defaults that we have found useful to minimize spurious differences between tests run in different environments. However, there are times when you want to deliberately test different widths, ANSI escapes, or Unicode characters, so you can override the defaults with `local_reproducible_output()`. ### Snapshotting graphics If you need to test graphical output, use {vdiffr}. vdiffr is used to test ggplot2 and incorporates everything we know about high-quality graphics tests that minimize false positives. Graphics testing is still often fragile, but using vdiffr means you will avoid all the problems we know how to avoid. ### Snapshotting values `expect_snapshot()` is the most used snapshot function because it records everything: the code you run, printed output, messages, warnings, and errors. If you care about the return value rather than any side effects, you might want to use `expect_snapshot_value()` instead. It offers a number of serialization approaches that provide a tradeoff between accuracy and human readability. ```{r} test_that("can snapshot a simple list", { x <- list(a = list(1, 5, 10), b = list("elephant", "banana")) expect_snapshot_value(x) }) ``` ## Whole file snapshotting `expect_snapshot()`, `expect_snapshot_output()`, `expect_snapshot_error()`, and `expect_snapshot_value()` use one snapshot file per test file. But that doesn't work for all file types—for example, what happens if you want to snapshot an image? `expect_snapshot_file()` provides an alternative workflow that generates one snapshot per expectation, rather than one file per test. Assuming you're in `test-burger.R`, then the snapshot created by `expect_snapshot_file(code_that_returns_path_to_file(), "toppings.png")` would be saved in `tests/testthat/_snaps/burger/toppings.png`. If a future change in the code creates a different file, it will be saved in `tests/testthat/_snaps/burger/toppings.new.png`. Unlike `expect_snapshot()` and friends, `expect_snapshot_file()` can't provide an automatic diff when the test fails. Instead, you'll need to call `snapshot_review()`. This launches a Shiny app that allows you to visually review each change and approve it if it's deliberate: ```{r} #| echo: false #| fig-alt: Screenshot of the Shiny app for reviewing snapshot #| changes to images. It shows the changes to a png file of #| a plot created in a snapshot test. There is a button #| to accept the changed snapshot, or to skip it. knitr::include_graphics("review-image.png") ``` ```{r} #| echo: false #| fig-alt: Screenshot of the Shiny app for reviewing snapshot #| changes to text files. It shows the changes to a .R file #| created in a snapshot test, where a line has been removed. #| There is a button to accept the changed snapshot, or to skip it. knitr::include_graphics("review-text.png") ``` The display varies based on the file type (currently text files, common image files, and csv files are supported). Sometimes the failure occurs in a non-interactive environment where you can't run `snapshot_review()`, e.g., in `R CMD check`. In this case, the easiest fix is to retrieve the `.new` file, copy it into the appropriate directory, and then run `snapshot_review()` locally. If this happens on GitHub, testthat provides some tools to help you in the form of `gh_download_artifact()`. In most cases, we don't expect you to use `expect_snapshot_file()` directly. Instead, you'll use it via a wrapper that does its best to gracefully skip tests when differences in platform or package versions make it unlikely to generate perfectly reproducible output. That wrapper should also typically call `announce_snapshot_file()` to avoid snapshots being incorrectly cleaned up—see the documentation for more details. ## Previous work This is not the first time that testthat has attempted to provide snapshot testing (although it's the first time I knew what other languages called them). This section describes some of the previous attempts and why we believe the new approach is better. - `verify_output()` has three main drawbacks: - You have to supply a path where the output will be saved. This seems like a small issue, but thinking of a good name, and managing the difference between interactive and test-time paths introduces a surprising amount of friction. - It always overwrites the previous result, automatically assuming that the changes are correct. That means you have to use it with git, and it's easy to accidentally accept unwanted changes. - It's relatively coarse grained, which means tests that use it tend to keep growing and growing. - `expect_known_output()` is a finer-grained version of `verify_output()` that captures output from a single function. The requirement to produce a path for each individual expectation makes it even more painful to use. - `expect_known_value()` and `expect_known_hash()` have all the disadvantages of `expect_known_output()`, but also produce binary output, meaning that you can't easily review test differences in pull requests. testthat/vignettes/skipping.Rmd0000644000176200001440000001324715067547665016446 0ustar liggesusers--- title: "Skipping tests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Skipping tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` Sometimes you have tests that you can't or don't want to run in certain circumstances. This vignette describes how to **skip** tests to avoid execution in undesired environments. The most common scenarios are: - You're testing a web service that occasionally fails, and you don't want to run the tests on CRAN. Or the API requires authentication, and you can only run the tests when you've [securely distributed](https://gargle.r-lib.org/articles/articles/managing-tokens-securely.html) secrets. - You're relying on features that not all operating systems possess, and want to make sure your code doesn't run on platforms where it doesn't work. The most common platform with limitations is Windows, which among other things lacks full UTF-8 support. - You're writing tests for multiple versions of R or multiple versions of a dependency, and you want to skip when a feature isn't available. You generally don't need to skip tests if a suggested package is not installed. This is only needed in exceptional circumstances, e.g., when a package is not available on some operating systems. ```{r setup} library(testthat) ``` ## Basics testthat comes with a variety of helpers for the most common situations: - `skip_if_not_installed()` skips if a required package is not installed. You can optionally supply a minimal version too. - `skip_on_cran()` skips tests on CRAN. `skip_on_bioc()` skips tests on Bioconductor. This is useful for slow tests and tests that occasionally fail for reasons outside of your control. - `skip_on_os()` allows you to skip tests on a specific operating system. Generally, you should strive to avoid this as much as possible (so your code works the same on all platforms), but sometimes it's just not possible. - `skip_on_ci()` skips tests on most CI platforms (e.g., GitHub Actions). - `skip_on_covr()` skips tests during code coverage. - `skip_unless_r(">= 4.2")` only runs tests for newer R versions. `skip_unless_r("< 4.2")` only runs tests for older R versions. You can implement your own using skips `skip_if()` or `skip_if_not()`: ```{r} #| eval: false # Only run test if a token file is available skip_if_not(file.exists("secure-token.json")) # Only run test if R has memory profiling capabilities skip_if_not(capabilities("profmem")) # Only run if we've opted-in to slow tests with an env var skip_if(Sys.getenv("RUN_SLOW_TESTS") == "true") ``` All reporters show which tests are skipped. As of testthat 3.0.0, ProgressReporter (used interactively) and CheckReporter (used inside `R CMD check`) also display a summary of skips across all tests. It looks something like this: ``` ── Skipped tests ─────────────────────────────────────────────────────── ● No token (3) ● On CRAN (1) ``` This display is really important, and you should keep an eye on it when working on your test suite. If you accidentally skip too many tests, you can trick yourself into believing your code is working correctly, when actually you're just not testing it. ## Helpers If you find yourself using the same `skip_if()` or `skip_if_not()` expression across multiple tests, it's a good idea to create a helper function. This function should start with `skip_` and live in a `tests/testthat/helper-{something}.R` file: ```{r} skip_if_dangerous <- function() { if (!identical(Sys.getenv("DANGER"), "")) { skip("Not run in dangerous environments.") } else { invisible() } } ``` ## Embedding `skip()` in package functions Another useful technique is to embed a `skip()` directly into a package function. For example, take a look at [`pkgdown:::convert_markdown_to_html()`](https://github.com/r-lib/pkgdown/blob/v2.0.7/R/markdown.R#L95-L106), which absolutely cannot work if the Pandoc tool is unavailable: ```{r eval = FALSE} convert_markdown_to_html <- function(in_path, out_path, ...) { if (rmarkdown::pandoc_available("2.0")) { from <- "markdown+gfm_auto_identifiers-citations+emoji+autolink_bare_uris" } else if (rmarkdown::pandoc_available("1.12.3")) { from <- "markdown_github-hard_line_breaks+tex_math_dollars+tex_math_single_backslash+header_attributes" } else { if (is_testing()) { testthat::skip("Pandoc not available") } else { abort("Pandoc not available") } } ... } ``` If Pandoc is not available when `convert_markdown_to_html()` executes, it throws an error *unless* it appears to be part of a test run, in which case the test is skipped. This is an alternative to implementing a custom skipper, e.g., `skip_if_no_pandoc()`, and inserting it into many of pkgdown's tests. We don't want pkgdown to have a runtime dependency on testthat, so pkgdown includes a copy of `testthat::is_testing()`: ```{r} is_testing <- function() { identical(Sys.getenv("TESTTHAT"), "true") } ``` It might look like the code still has a runtime dependency on testthat, because of the call to `testthat::skip()`. But `testthat::skip()` is only executed during a test run, which means that testthat is installed. We have mixed feelings about this approach. On the one hand, it feels elegant and concise, and it absolutely guarantees that you'll never miss a needed skip in one of your tests. On the other hand, it mixes code and tests in an unusual way, and when you're focused on the tests, it's easy to miss the fact that a package function contains a `skip()`. testthat/vignettes/custom-expectation.Rmd0000644000176200001440000003024515111027202020414 0ustar liggesusers--- title: "Custom expectations" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Custom expectations} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup} #| include: false library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") # Pretend we're snapshotting snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") ``` This vignette shows you how to write your own expectations. Custom expectations allow you to extend testthat to meet your own specialized testing needs, creating new `expect_*` functions that work exactly the same way as the built-ins. Custom expectations are particularly useful if you want to produce expectations tailored for domain-specific data structures, combine multiple checks into a single expectation, or create more actionable feedback when an expectation fails. You can use them within your package by putting them in a helper file, or share them with others by exporting them from your package. In this vignette, you'll learn about the three-part structure of expectations, how to test your custom expectations, see a few examples, and, if you're writing a lot of expectations, learn how to reduce repeated code. ## Do you need it? But before you read the rest of the vignette and dive into the full details of creating a 100% correct expectation, consider if you can get away with a simpler wrapper. If you're just customising an existing expectation by changing some defaults, you're fine: ```{r} expect_df <- function(tbl) { expect_s3_class(tbl, "data.frame") } ``` If you're combining multiple expectations, you can introduce a subtle problem. For example, take this expectation from tidytext: ```{r} # from tidytext expect_nrow <- function(tbl, n) { expect_s3_class(tbl, "data.frame") expect_equal(nrow(tbl), n) } ``` If we use it in a test you can see there's an issue: ```{r} #| error: true test_that("success", { expect_nrow(mtcars, 32) }) test_that("failure 1", { expect_nrow(mtcars, 30) }) test_that("failure 2", { expect_nrow(matrix(1:5), 2) }) ``` Each of these tests contain a single expectation, but they report a total of two successes and failures. It would be confusing if testthat didn't report these numbers correctly. But as a helper in your package, it's probably not a big deal. You might also notice that these failures generate a backtrace whereas built-in expectations don't. Again, it's not a big deal because the backtrace is correct, it's just not needed. These are both minor issues, so if they don't bother you, you can save yourself some pain by not reading this vignette 😀. ## Expectation basics An expectation has four main parts, as illustrated by `expect_length()`: ```{r} expect_length <- function(object, n) { # 1. Capture object and label act <- quasi_label(rlang::enquo(object)) act_n <- length(act$val) if (act_n != n) { # 2. Fail if expectations are violated fail(c( sprintf("Expected %s to have length %i.", act$lab, n), sprintf("Actual length: %i.", act_n) )) } else { # 3. Pass if expectations are met pass() } # 4. Invisibly return the input value invisible(act$val) } ``` The first step in any expectation is to use `quasi_label()` to capture a "labeled value", i.e., a list that contains both the value (`$val`) for testing and a label (`$lab`) used to make failure messages as informative as possible. This is a pattern that exists for fairly esoteric reasons; you don't need to understand it, just copy and paste it. Next you need to check each way that `object` could violate the expectation. In this case, there's only one check, but in more complicated cases there can be many. Note the specific form of the failure message: the first element describes what we expected, and then the second line reports what we actually saw. If the object is as expected, call `pass()`. This ensures that a success will be registered in the test reporter. Otherwise, call `fail()`. This ensures that a failure will be registered in the test reporter. NB: unlike `stop()` or `abort()`, `fail()` signals a failure but allows code execution to continue, ensuring that one failure does not prevent subsequent expectations from running. Finally, return the input value (`act$val`) invisibly. This is good practice because expectations are called primarily for their side-effects (triggering a failure), and returning the value allows expectations to be piped together: ```{r} #| label: piping test_that("mtcars is a 13 row data frame", { mtcars |> expect_type("list") |> expect_s3_class("data.frame") |> expect_length(11) }) ``` ### Testing your expectations Once you've written your expectation, you need to test it: expectations are functions that can have bugs, just like any other function, and it's really important that they generate actionable failure messages. Luckily testthat comes with three expectations designed specifically to test expectations: * `expect_success()` checks that your expectation emits exactly one success and zero failures. * `expect_failure()` checks that your expectation emits exactly one failure and zero successes. * `expect_snapshot_failure()` captures the failure message in a snapshot, making it easier to review whether it's useful. The first two expectations are particularly important because they ensure that your expectation always reports either a single success or a single failure. If it doesn't, the end user is going to get confusing results in their test suite reports. ```{r} test_that("expect_length works as expected", { x <- 1:10 expect_success(expect_length(x, 10)) expect_failure(expect_length(x, 11)) }) test_that("expect_length gives useful feedback", { x <- 1:10 expect_snapshot_failure(expect_length(x, 11)) }) ``` ## Examples The following sections show you a few more variations, loosely based on existing testthat expectations. These expectations were picked to show how you can generate actionable failures in slightly more complex situations. ### `expect_vector_length()` Let's make `expect_length()` a bit more strict by also checking that the input is a vector. R is a bit unusual in that it gives a length to pretty much every object, and you can imagine not wanting code like the following to succeed, because it's likely that the user passed the wrong object to the test. ```{r} expect_length(mean, 1) ``` To do this we'll add an extra check that the input is either an atomic vector or a list: ```{r} expect_vector_length <- function(object, n) { act <- quasi_label(rlang::enquo(object)) # It's non-trivial to check if an object is a vector in base R so we # use an rlang helper if (!rlang::is_vector(act$val)) { fail(c( sprintf("Expected %s to be a vector", act$lab), sprintf("Actual type: %s", typeof(act$val)) )) } else { act_n <- length(act$val) if (act_n != n) { fail(c( sprintf("Expected %s to have length %i.", act$lab, n), sprintf("Actual length: %i.", act_n) )) } else { pass() } } invisible(act$val) } ``` ```{r} #| error: true expect_vector_length(mean, 1) expect_vector_length(mtcars, 15) ``` ### `expect_s3_class()` Or imagine you're checking to see if an object inherits from an S3 class. R has a lot of different OO systems, and you want your failure messages to be as informative as possible, so before checking that the class matches, you probably want to check that the object is from the correct OO family. ```{r} expect_s3_class <- function(object, class) { if (!rlang::is_string(class)) { rlang::abort("`class` must be a string.") } act <- quasi_label(rlang::enquo(object)) if (!is.object(act$val)) { fail(sprintf("Expected %s to be an object.", act$lab)) } else if (isS4(act$val)) { fail(c( sprintf("Expected %s to be an S3 object.", act$lab), "Actual OO type: S4" )) } else if (!inherits(act$val, class)) { fail(c( sprintf("Expected %s to inherit from %s.", act$lab, class), sprintf("Actual class: %s", class(act$val)) )) } else { pass() } invisible(act$val) } ``` ```{r} #| error: true x1 <- 1:10 TestClass <- methods::setClass("Test", contains = "integer") x2 <- TestClass() x3 <- factor() expect_s3_class(x1, "integer") expect_s3_class(x2, "integer") expect_s3_class(x3, "integer") expect_s3_class(x3, "factor") ``` Note the variety of error messages. We always print what was expected, and where possible, also display what was actually received: * When `object` isn't an object, we can only say what we expected. * When `object` is an S4 object, we can report that. * When `inherits()` is `FALSE`, we provide the actual class, since that's most informative. The general principle is to tailor error messages to what the user can act on based on what you know about the input. Also note that I check that the `class` argument is a string. If it's not a string, I throw an error. This is not a test failure; the user is calling the function incorrectly. In general, you should check the type of all arguments that affect the operation and error if they're not what you expect. ```{r} #| error: true expect_s3_class(x1, 1) ``` ### Optional `class` A common pattern in testthat's own expectations it to use arguments to control the level of detail in the test. Here it would be nice if we check that an object is an S3 object without checking for a specific class. I think we could do that by renaming `expect_s3_class()` to `expect_s3_object()`. Now `expect_s3_object(x)` would verify that `x` is an S3 object, and `expect_s3_object(x, class = "foo")` to verify that `x` is an S3 object with the given class. The implementation of this is straightforward: we also allow `class` to be `NULL` and then only verify inheritance when non-`NULL`. ```{r} expect_s3_object <- function(object, class = NULL) { if (!rlang::is_string(class) && is.null(class)) { rlang::abort("`class` must be a string or NULL.") } act <- quasi_label(rlang::enquo(object)) if (!is.object(act$val)) { fail(sprintf("Expected %s to be an object.", act$lab)) } else if (isS4(act$val)) { fail(c( sprintf("Expected %s to be an S3 object.", act$lab), "Actual OO type: S4" )) } else if (!is.null(class) && !inherits(act$val, class)) { fail(c( sprintf("Expected %s to inherit from %s.", act$lab, class), sprintf("Actual class: %s", class(act$val)) )) } else { pass() } invisible(act$val) } ``` ## Repeated code As you write more expectations, you might discover repeated code that you want to extract into a helper. Unfortunately, creating 100% correct helper functions is not straightforward in testthat because `fail()` captures the calling environment in order to give useful tracebacks, and testthat's own expectations don't expose this as an argument. Fortunately, getting this right is not critical (you'll just get a slightly suboptimal traceback in the case of failure), so we don't recommend bothering in most cases. We document it here, however, because it's important to get it right in testthat itself. The key challenge is that `fail()` captures a `trace_env`, which should be the execution environment of the expectation. This usually works because the default value of `trace_env` is `rlang::caller_env()`. But when you introduce a helper, you'll need to explicitly pass it along: ```{r} expect_length_ <- function(act, n, trace_env = caller_env()) { act_n <- length(act$val) if (act_n != n) { fail( sprintf("%s has length %i, not length %i.", act$lab, act_n, n), trace_env = trace_env ) } else { pass() } } expect_length <- function(object, n) { act <- quasi_label(rlang::enquo(object)) expect_length_(act, n) invisible(act$val) } ``` A few recommendations: * The helper shouldn't be user-facing, so we give it a `_` suffix to make that clear. * It's typically easiest for a helper to take the labeled value produced by `quasi_label()`. * Your helper should usually be called for its side effects (i.e. it calls `fail()` and `pass()`). * You should return `invisible(act$val)` from the parent expecatation as usual. Again, you're probably not writing so many expectations that it makes sense for you to go to this effort, but it is important for testthat to get it right. testthat/vignettes/test-fixtures.Rmd0000644000176200001440000004031715070045757017433 0ustar liggesusers--- title: "Test fixtures" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Test fixtures} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Test hygiene > Take nothing but memories, leave nothing but footprints. > > ― Chief Si'ahl Ideally, a test should leave the world exactly as it found it. But you often need to make changes to exercise every part of your code: - Create a file or directory - Create a resource on an external system - Set an R option - Set an environment variable - Change working directory - Change an aspect of the tested package's state How can you clean up these changes to get back to a clean slate? Scrupulous attention to cleanup is more than just courtesy or being fastidious. It's also self-serving. The state of the world after test `i` is the starting state for test `i + 1`. Tests that change state willy-nilly eventually end up interfering with each other in ways that can be very difficult to debug. Most tests are written with an implicit assumption about the starting state, usually whatever *tabula rasa* means for the target domain of your package. If you accumulate enough sloppy tests, you will eventually find yourself asking the programming equivalent of questions like "Who forgot to turn off the oven?" and "Who didn't clean up after the dog?" (If you've got yourself into this state, testthat provides another tool to help you figure out exactly which test is to blame: `set_state_inspector()`.) It's also important that your setup and cleanup are easy to use when working interactively. When a test fails, you want to be able to quickly recreate the exact environment in which the test is run so you can interactively experiment to figure out what went wrong. This article introduces a powerful technique that allows you to solve both problems: **test fixtures**. We'll begin by discussing some canned tools, then learn about the underlying theory, discuss exactly what a test fixture is, and finish with a few examples. ```{r} library(testthat) ``` ## `local_` helpers We'll begin by giving you the minimal knowledge needed to change global state *just* within your test. The withr package provides a number of functions that temporarily change the state of the world, carefully undoing the changes when the current function or test finishes: | Do / undo this | withr function | |-----------------------------|-------------------| | Create a file | `local_tempfile()`| | Create a directory | `local_tempdir()` | | Set an R option | `local_options()` | | Set an environment variable | `local_envvar()` | | Change working directory | `local_dir()` | (You can see a full list at but these five are by far the most commonly used.) These allow you to control options that would otherwise be painful. For example, imagine you're testing base R code that rounds numbers to a fixed number of places when printing. You could write code like this: ```{r} test_that("print() respects digits option", { x <- 1.23456789 withr::local_options(digits = 1) expect_equal(capture.output(x), "[1] 1") withr::local_options(digits = 5) expect_equal(capture.output(x), "[1] 1.2346") }) ``` If you write a lot of code like this in your tests, you might decide you want a helper function or **test fixture** that reduces the duplication. Fortunately withr's local functions allow us to solve this problem by providing an `.local_envir` or `envir` argument that controls when cleanup occurs. The exact details of how this works are rather complicated, but fortunately there's a common pattern you can use without understanding all the details. Your helper function should always have an `env` argument that defaults to `parent.frame()`, which you pass to the `.local_envir` argument of `local_()`: ```{r} local_digits <- function(sig_digits, env = parent.frame()) { withr::local_options(digits = sig_digits, .local_envir = env) # mark that this function is called for its side-effects not its return value invisible() } ``` ## Foundations Before we go further, let's lay some foundations to help you understand how `local_` functions work. We'll motivate the discussion with a `sloppy()` function that prints a number with a specific number of significant digits by adjusting an R option: ```{r include = FALSE} op <- options() ``` ```{r} sloppy <- function(x, sig_digits) { options(digits = sig_digits) print(x) } pi sloppy(pi, 2) pi ``` ```{r include = FALSE} options(op) ``` Notice how `pi` prints differently before and after the call to `sloppy()`. Calling `sloppy()` has a side effect: it changes the `digits` option globally, not just within its own scope. This is what we want to avoid[^1]. [^1]: Don't worry, I'm restoring global state (specifically, the `digits` option) behind the scenes here. ### `on.exit()` The first function you need to know about is base R's `on.exit()`. `on.exit()` calls the code supplied to its first argument when the current function exits, regardless of whether it returns a value or throws an error. You can use `on.exit()` to clean up after yourself by ensuring that every mess-making function call is paired with an `on.exit()` call that cleans up. We can use this idea to turn `sloppy()` into `neat()`: ```{r} neat <- function(x, sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) print(x) } pi neat(pi, 2) pi ``` Here we make use of a useful pattern that `options()` implements: when you call `options(digits = sig_digits)`, it both sets the `digits` option *and* (invisibly) returns the previous value of digits. We can then use that value to restore the previous options. `on.exit()` also works in tests: ```{r} test_that("can print one digit of pi", { op <- options(digits = 1) on.exit(options(op), add = TRUE, after = FALSE) expect_output(print(pi), "3") }) pi ``` There are three main drawbacks to `on.exit()`: - You should always call it with `add = TRUE` and `after = FALSE`. These ensure that the call is **added** to the list of deferred tasks (instead of replacing them) and is added to the **front** of the stack (not the back), so that cleanup occurs in reverse order to setup. These arguments only matter if you're using multiple `on.exit()` calls, but it's a good habit to always use them to avoid potential problems down the road. - It doesn't work outside a function or test. If you run the following code in the global environment, you won't get an error, but the cleanup code will never be run: ```{r, eval = FALSE} op <- options(digits = 1) on.exit(options(op), add = TRUE, after = FALSE) ``` This is annoying when you are running tests interactively. - You can't program with it; `on.exit()` always works inside the *current* function, so you can't wrap up repeated `on.exit()` code in a helper function. To resolve these drawbacks, we use `withr::defer()`. ### `withr::defer()` `withr::defer()` resolves the main drawbacks of `on.exit()`. First, it has the behavior we want by default; no extra arguments needed: ```{r} neat <- function(x, sig_digits) { op <- options(digits = sig_digits) withr::defer(options(op)) print(x) } ``` Second, it works when called in the global environment. Since the global environment isn't perishable, like a test environment is, you have to call `deferred_run()` explicitly to execute the deferred events. You can also clear them, without running, with `deferred_clear()`. ```{r, eval = FALSE} withr::defer(print("hi")) #> Setting deferred event(s) on global environment. #> * Execute (and clear) with `deferred_run()`. #> * Clear (without executing) with `deferred_clear()`. withr::deferred_run() #> [1] "hi" ``` Finally, `withr::defer()` lets you pick which function to bind the cleanup behavior to. This makes it possible to create helper functions. ### "Local" helpers Imagine we have many functions where we want to temporarily set the digits option. Wouldn't it be nice if we could write a helper function to automate this? Unfortunately, we can't write a helper with `on.exit()`: ```{r} local_digits <- function(sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) } neater <- function(x, sig_digits) { local_digits(1) print(x) } neater(pi) ``` This code doesn't work because the cleanup happens too soon, when `local_digits()` exits, not when `neater()` finishes. Fortunately, `withr::defer()` allows us to solve this problem by providing an `envir` argument that allows you to control when cleanup occurs. The exact details of how this works are rather complicated, but fortunately there's a common pattern you can use without understanding all the details. Your helper function should always have an `env` argument that defaults to `parent.frame()`, which you pass to the second argument of `defer()`: ```{r} local_digits <- function(sig_digits, env = parent.frame()) { op <- options(digits = sig_digits) withr::defer(options(op), env) } neater(pi) ``` Just like `on.exit()` and `defer()`, our helper also works within tests: ```{r} test_that("withr lets us write custom helpers for local state manipulation", { local_digits(1) expect_output(print(exp(1)), "3") local_digits(3) expect_output(print(exp(1)), "2.72") }) print(exp(1)) ``` We always call these helper functions `local_*`; "local" here refers to the fact that the state change persists only locally, for the lifetime of the associated function or test. Another reason we call them "local" is that you can also use the `local()` function if you want to scope their effect to a smaller part of the test: ```{r} test_that("local_options() only affects a minimal amount of code", { withr::local_options(x = 1) expect_equal(getOption("x"), 1) local({ withr::local_options(x = 2) expect_equal(getOption("x"), 2) }) expect_equal(getOption("x"), 1) }) getOption("x") ``` ## Test fixtures Testing is often demonstrated with cute little tests and functions where all the inputs and expected results can be inlined. But in real packages, things aren't always so simple, and functions often depend on global state. For example, take this variant on `message()` that only shows a message if the `verbose` option is `TRUE`. How would you test that setting the option does indeed silence the message? ```{r} message2 <- function(...) { if (!isTRUE(getOption("verbose"))) { return() } message(...) } ``` In some cases, it's possible to make the global state an explicit argument to the function. For example, we could refactor `message2()` to make the verbosity an explicit argument: ```{r} message3 <- function(..., verbose = getOption("verbose")) { if (!isTRUE(verbose)) { return() } message(...) } ``` Making external state explicit is often worthwhile because it makes clearer exactly what inputs determine the outputs of your function. But it's simply not possible in many cases. That's where test fixtures come in: they allow you to temporarily change global state to test your function. Test fixture is a pre-existing term in the software engineering world (and beyond): > A test fixture is something used to consistently test some item, device, or piece of software. > > --- [Wikipedia](https://en.wikipedia.org/wiki/Test_fixture) A **test fixture** is just a `local_*` function that you use to change state in such a way that you can reach inside and test parts of your code that would otherwise be challenging. For example, here's how you could use `withr::local_options()` as a test fixture to test `message2()`: ```{r} test_that("message2() output depends on verbose option", { withr::local_options(verbose = TRUE) expect_message(message2("Hi!")) withr::local_options(verbose = FALSE) expect_message(message2("Hi!"), NA) }) ``` ### Case study: usethis One place that we use test fixtures extensively is in the usethis package ([usethis.r-lib.org](https://usethis.r-lib.org)), which provides functions for looking after the files and folders in R projects, especially packages. Many of these functions only make sense in the context of a package, which means to test them, we also have to be working inside an R package. We need a way to quickly spin up a minimal package in a temporary directory, then test some functions against it, then destroy it. To solve this problem we create a test fixture, which we place in `R/test-helpers.R` so that it's available for both testing and interactive experimentation: ```{r, eval = FALSE} local_create_package <- function(dir = file_temp(), env = parent.frame()) { old_project <- proj_get_() # create new folder and package create_package(dir, open = FALSE) # A withr::defer(fs::dir_delete(dir), envir = env) # -A # change working directory withr::local_dir(dir, .local_envir = env) # B + -B # switch to new usethis project proj_set(dir) # C withr::defer(proj_set(old_project, force = TRUE), envir = env) # -C dir } ``` Note that the cleanup automatically unfolds in the opposite order from the setup. Setup is `A`, then `B`, then `C`; cleanup is `-C`, then `-B`, then `-A`. This is important because we must create directory `dir` before we can make it the working directory, and we must restore the original working directory before we can delete `dir`—we can't delete `dir` while it's still the working directory! `local_create_package()` is used in over 170 tests. Here's one example that checks that `usethis::use_roxygen_md()` does the setup necessary to use roxygen2 in a package, with markdown support turned on. All 3 expectations consult the DESCRIPTION file, directly or indirectly. So it's very convenient that `local_create_package()` creates a minimal package, with a valid `DESCRIPTION` file, for us to test against. And when the test is done --- poof! --- the package is gone. ```{r eval = FALSE} test_that("use_roxygen_md() adds DESCRIPTION fields", { pkg <- local_create_package() use_roxygen_md() expect_true(uses_roxygen_md()) expect_equal(desc::desc_get("Roxygen", pkg)[[1]], "list(markdown = TRUE)") expect_true(desc::desc_has_fields("RoxygenNote", pkg)) }) ``` ## Scope So far we have applied our test fixture to individual tests, but it's also possible to apply them to a file or package. ### File If you move the `local_*()` call outside of a `test_that()` block, it will affect all tests that come after it. This means that by calling the test fixture at the top of the file, you can change the behavior for all tests. This has both advantages and disadvantages: - If you would otherwise have called the fixture in every test, you've saved yourself a bunch of work and duplicate code. - On the downside, if your test fails and you want to recreate the failure in an interactive environment so you can debug, you need to remember to run all the setup code at the top of the file first. Generally, I think it's better to copy and paste test fixtures across many tests --- sure, it adds some duplication to your code, but it makes debugging test failures so much easier. ### Package To run code before any test is run, you can create a file called `tests/testthat/setup.R`. If the code in this file needs clean up, you can use the special `teardown_env()`: ```{r, eval = FALSE} # Run before any test write.csv(mtcars, "mtcars.csv") # Run after all tests withr::defer(unlink("mtcars.csv"), teardown_env()) ``` Setup code is typically best used to create external resources that are needed by many tests. It's best kept to a minimum because you will have to manually run it before interactively debugging tests. ## Other challenges A collection of miscellaneous problems that don't fit elsewhere: - There are a few base functions that are hard to test because they depend on state that you can't control. One such example is `interactive()`: there's no way to write a test fixture that allows you to pretend that interactive is either `TRUE` or `FALSE`. So we now usually use `rlang::is_interactive()`, which can be controlled by the `rlang_interactive` option. - If you're using a test fixture in a function, be careful about what you return. For example, if you write a function that does `dir <- create_local_package()`, you shouldn't return `dir`, because after the function returns, the directory will no longer exist. testthat/vignettes/parallel.Rmd0000644000176200001440000002061115056337671016377 0ustar liggesusers--- title: "Running tests in parallel" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Running tests in parallel} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} editor_options: markdown: wrap: sentence --- ```{r setup, include = FALSE} library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") ``` ## Setup To enable parallel testing, you must first be using the 3rd edition[^1]. Then add the following line to the `DESCRIPTION`: [^1]: See `vignette("third-edition")` for details. ``` Config/testthat/parallel: true ``` If needed (for example, for debugging) you can temporarily suppress parallel testing with `Sys.setenv(TESTTHAT_PARALLEL = "false")`. By default, testthat will use `getOption("Ncpus", 2)` cores. To increase that value for your development machine we recommend setting `TESTTHAT_CPUS` in your `.Renviron`. The easiest way to do that is call `usethis::edit_r_environ()` and then add something like the following: ``` TESTTHAT_CPUS=4 ``` Tests are run in alphabetical order by default, but you can often improve performance by starting the slowest tests first. Specify these tests by supplying a comma separated list of glob patterns[^2] to the `Config/testthat/start-first` field in your `DESCRIPTION`, e.g.: [^2]: See `?utils::glob2rx` for details ``` Config/testthat/start-first: watcher, parallel* ``` ## Basic operation Each worker begins by loading testthat and the package being tested. It then runs any setup files (so if you have existing setup files you'll need to make sure they work when executed in parallel). testthat runs test *files* in parallel. Once the worker pool is initialized, testthat then starts sending test files to workers, by default in alphabetical order: as soon as a subprocess has finished, it receives another file, until all files are done. This means that state is persisted across test files: options are *not* reset, loaded packages are *not* unloaded, the global environment is *not* cleared, etc. You are responsible for making sure each file leaves the world as it finds it. ## Common problems - If tests fail stochastically (i.e. they sometimes work and sometimes fail) you may have accidentally introduced a dependency between your test files. This sort of dependency is hard to track down due to the random nature, and you'll need to check all tests to make sure that they're not accidentally changing global state. `set_state_inspector()` will make this easier. - If you use [packaged scope test fixtures](https://testthat.r-lib.org/articles/test-fixtures.html#package), you'll need to review them to make sure that they work in parallel. For example, if you were previously creating a temporary database in the test directory, you'd need to instead create it in the session temporary directory so that each process gets its own independent version. ## Performance There is some overhead associated with running tests in parallel: - Startup cost is linear in the number of subprocesses, because we need to create them in a loop. This is about 50ms on my laptop. Each subprocess needs to load testthat and the tested package, this happens in parallel, and we cannot do too much about it. - Clean up time is again linear in the number of subprocesses, and it about 80ms per subprocess on my laptop. - It seems that sending a message (i.e. a passing or failing expectation) is about 2ms currently. This is the total cost that includes sending the message, receiving it, and replying it to a non-parallel reporter. This overhead generally means that if you have many test files that take a short amount of time, you're unlikely to see a huge benefit by using parallel tests. For example, testthat itself takes about 10s to run tests in serial, and 8s to run the tests in parallel. ## Reporters ### Default reporters See `default_reporter()` for how testthat selects the default reporter for `devtools::test()` and `testthat::test_local()`. In short, testthat selects `ProgressReporter` for non-parallel and `ParallelProgressReporter` for parallel tests by default. (Other testthat test functions, like `test_check()`, `test_file()` , etc. select different reporters by default.) ### Parallel support Most reporters support parallel tests. If a reporter is passed to `devtools::test()`, `testthat::test_dir()`, etc. directly, and it does not support parallel tests, then testthat runs the test files sequentially. Currently the following reporters *don't* support parallel tests: - `DebugReporter`, because it is not currently possible to debug subprocesses. - `JunitReporter`, because this reporter records timing information for each test block, and this is currently only available for reporters that support multiple active test files. (See "Writing parallel reporters" below.) - `LocationReporter` because testthat currently does not include location information for successful tests when running in parallel, to minimize messaging between the processes. - `StopReporter`, as this is a reporter that testthat uses for interactive `expect_that()` calls. The other built-in reporters all support parallel tests, with some subtle differences: - Reporters that stop after a certain number of failures can only stop at the end of a test file. - Reporters report all information about a file at once, unless they support *parallel updates*. E.g. `ProgressReporter` does not update its display until a test file is complete. - The standard output and standard error, i.e. `print()`, `cat()`, `message()`, etc. output from the test files are lost currently. If you want to use `cat()` or `message()` for print-debugging test cases, then the best is to temporarily run tests sequentially, by changing the `Config` entry in `DESCRIPTION` or setting `Sys.setenv(TESTTHAT_PARALLEL = "false")`. ### Writing parallel reporters To support parallel tests, a reporter must be able to function when the test files run in a subprocess. For example `DebugReporter` does not support parallel tests, because it requires direct interaction with the frames in the subprocess. When running in parallel, testthat does not provide location information (source references) for test successes. To support parallel tests, a reporter must set `self$capabilities$parallel_support` to `TRUE` in its `initialize()` method: ``` r ... initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE ... } ... ``` When running in parallel, testthat runs the reporter in the main process, and relays information between the reporter and the test code transparently. (Currently the reporter does not even know that the tests are running in parallel.) If a reporter does not support parallel updates (see below), then testthat internally caches all calls to the reporter methods from subprocesses, until a test file is complete. This is because these reporters are not prepared for running multiple test files concurrently. Once a test file is complete, testthat calls the reporter's `$start_file()` method, relays all `$start_test()` , `$end_test()`, `$add_result()`, etc. calls in the order they came in from the subprocess, and calls `$end_file()` . ### Parallel updates The `ParallelProgressReporter` supports parallel updates. This means that once a message from a subprocess comes in, the reporter is updated immediately. For this to work, a reporter must be able to handle multiple test files concurrently. A reporter declares parallel update support by setting `self$capabilities$parallel_updates` to `TRUE`: ``` r ... initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$capabilities$parallel_updates <- TRUE ... } ... ``` For these reporters, testthat does not cache the messages from the subprocesses. Instead, when a message comes in: - It calls the `$start_file()` method, letting the reporter know which file the following calls apply to. This means that the reporter can receive multiple `$start_file()` calls for the same file. - Then relays the message from the subprocess, calling the appropriate `$start_test()` , `$add_result()`, etc. method. testthat also calls the new `$update()` method of the reporter regularly, even if it does not receive any messages from the subprocess. (Currently aims to do this every 100ms, but there are no guarantees.) The `$update()` method may implement a spinner to let the user know that the tests are running. testthat/vignettes/challenging-tests.Rmd0000644000176200001440000001776315072252215020220 0ustar liggesusers--- title: "Testing challenging functions" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Testing challenging functions} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r} #| include: false library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") # Pretend we're snapshotting snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") # Pretend we're testing testthat so we can use mocking Sys.setenv(TESTTHAT_PKG = "testthat") ``` This vignette is a quick reference guide for testing challenging functions. It's organized by problem type rather than technique, so you can quickly skim the whole vignette, spot the problem you're facing, and then learn more about useful tools for solving it. In it, you'll learn how to overcome the following challenges: * Functions with implicit inputs, like options and environment variables. * Random number generators. * Tests that can't be run in some environments. * Testing web APIs. * Testing graphical output. * User interaction. * User-facing text. * Repeated code. ## Options and environment variables If your function depends on options or environment variables, first try refactoring the function to make the [inputs explicit](https://design.tidyverse.org/inputs-explicit.html). If that's not possible, use functions like `withr::local_options()` or `withr::local_envvar()` to temporarily change options and environment values within a test. Learn more in `vignette("test-fixtures")`. ## Random numbers What happens if you want to test a function that relies on randomness in some way? If you're writing a random number generator, you probably want to generate a large quantity of random numbers and then apply some statistical test. But what if your function just happens to use a little bit of pre-existing randomness? How do you make your tests repeatable and reproducible? Under the hood, random number generators generate different numbers because they update a special `.Random.seed` variable stored in the global environment. You can temporarily set this seed to a known value to make your random numbers reproducible with `withr::local_seed()`, making random numbers a special case of test fixtures (`vignette("test-fixtures")`). Here's a simple example showing how you might test the basic operation of a function that rolls a die: ```{r} #| label: random-local-seed dice <- function() { sample(6, 1) } test_that("dice returns different numbers", { withr::local_seed(1234) expect_equal(dice(), 4) expect_equal(dice(), 2) expect_equal(dice(), 6) }) ``` Alternatively, you might want to mock (`vignette("mocking")`) the function to eliminate randomness. ```{r} #| label: random-mock roll_three <- function() { sum(dice(), dice(), dice()) } test_that("three dice adds values of individual calls", { local_mocked_bindings(dice = mock_output_sequence(1, 2, 3)) expect_equal(roll_three(), 6) }) ``` When should you set the seed and when should you use mocking? As a general rule of thumb, set the seed when you want to test the actual random behavior, and use mocking when you want to test the logic that uses the random results. ## Some tests can't be run in some circumstances You can skip a test without it passing or failing if you can't or don't want to run it (e.g., it's OS dependent, it only works interactively, or it shouldn't be tested on CRAN). Learn more in `vignette("skipping")`. ## HTTP requests If you're trying to test functions that rely on HTTP requests, we recommend using {vcr} or {httptest2}. These packages both allow you to interactively record HTTP responses and then later replay them in tests. This is a specialized type of mocking (`vignette("mocking")`) that works with {httr} and {httr2} to isolates your tests from failures in the underlying API. If your package is going to CRAN, you **must** either use one of these packages or use `skip_on_cran()` for all internet-facing tests. Otherwise, you are at high risk of failing `R CMD check` if the underlying API is temporarily down. This sort of failure causes extra work for the CRAN maintainers and extra hassle for you. ## Graphics The only type of testing you can use for graphics is snapshot testing (`vignette("snapshotting")`) via `expect_snapshot_file()`. Graphical snapshot testing is surprisingly challenging because you need pixel-perfect rendering across multiple versions of multiple operating systems, and this is hard, mostly due to imperceptible differences in font rendering. Fortunately we've needed to overcome these challenges in order to test {ggplot2}, and you can benefit from our experience by using {vdiffr} when testing graphical output. ## User interaction If you're testing a function that relies on user feedback (e.g. from `readline()`, `utils::menu()`, or `utils::askYesNo()`), you can use mocking (`vignette("mocking")`) to return fixed values within the test. For example, imagine that you've written the following function that asks the user if they want to continue: ```{r} #| label: continue continue <- function(prompt) { cat(prompt, "\n", sep = "") repeat { val <- readline("Do you want to continue? (y/n) ") if (val %in% c("y", "n")) { return(val == "y") } cat("! You must enter y or n\n") } } readline <- NULL ``` You could test its behavior by mocking `readline()` and using a snapshot test: ```{r} #| label: mock-readline test_that("user must respond y or n", { mock_readline <- local({ i <- 0 function(prompt) { i <<- i + 1 cat(prompt) val <- if (i == 1) "x" else "y" cat(val, "\n", sep = "") val } }) local_mocked_bindings(readline = mock_readline) expect_snapshot(val <- continue("This is dangerous")) expect_true(val) }) ``` If you don't care about reproducing the output of `continue()` and just want to recreate its return value, you can use `mock_output_sequence()`. This creates a function that returns the input supplied to `mock_output_sequence()` in sequence: the first input at the first call, the second input at the second call, etc. The following code shows how it works and how you might use it to test `readline()`: ```{r} f <- mock_output_sequence(1, 12, 123) f() f() f() ``` And ```{r} test_that("user must respond y or n", { local_mocked_bindings(readline = mock_output_sequence("x", "y")) expect_true(continue("This is dangerous")) }) ``` If you were testing the behavior of some function that uses `continue()`, you might want to mock `continue()` instead of `readline()`. For example, the function below requires user confirmation before overwriting an existing file. In order to focus our tests on the behavior of just this function, we mock `continue()` to return either `TRUE` or `FALSE` without any user messaging. ```{r} #| label: mock-continue save_file <- function(path, data) { if (file.exists(path)) { if (!continue("`path` already exists")) { stop("Failed to continue") } } writeLines(data, path) } test_that("save_file() requires confirmation to overwrite file", { path <- withr::local_tempfile(lines = letters) local_mocked_bindings(continue = function(...) TRUE) save_file(path, "a") expect_equal(readLines(path), "a") local_mocked_bindings(continue = function(...) FALSE) expect_snapshot(save_file(path, "a"), error = TRUE) }) ``` ## User-facing text Errors, warnings, and other user-facing text should be tested to ensure they're both actionable and consistent across the package. Obviously, it's not possible to test this automatically, but you can use snapshots (`vignette("snapshotting")`) to ensure that user-facing messages are clearly shown in PRs and easily reviewed by another human. ## Repeated code If you find yourself repeating the same set of expectations again and again across your test suite, it may be a sign that you should design your own expectation. Learn how in `vignette("custom-expectation")`. testthat/vignettes/review-image.png0000644000176200001440000023026614164710003017216 0ustar liggesusersPNG  IHDRX;> iCCPICC ProfileHPS{o: -! %@A:JH %T ,+*`Y EYu`C^v{oޙs99)ιP"I @X. d$%000P\LŠѷFbU(tr y0j7'miR@F8sp(c4G}b([@ sLND㐣QvEbr(yܹ#< /BCfxQ!dh(sX,ŠZ=s#T,N5"( a̓Sƙ P͙9*7Yp8KƨreH٬qJ'*Uv_ K|QqeFLUv"FU@87D{/8ra\wD1k",IU_<U$9*AN.ˏU핣hfqãDP ȁpA1#ͰJHEB98#9Nf8O{$G%D2a[Y qRyb~#i'l6IB?fÌ` hC8|@ ād0䢕rPJzT`7gEpw# zK0ރaBz d C.򃂡H(JҠLH )EJ**]P5t:.C @a&4LGq,8΃ Bx-\W,|w/! j1E&F$"Kb Bjf t"' C00L&a``*01 [. 5cl6;[-c/``{q8gąqY5:\ ׍zx{/> E3GB!Ą2iMB/aI$z|:b3:8L"Y|Iq,rR9tVMMLKmHmZaKj]jd;2JVג[o)%BSR)(O)թuRJ4,e4k h54ٚ\%5iiQrhкէ׶kj>MET6G]IC@h4-VB;Dk h$ש9IGVt=~~y$$Փj'ݜA@7@W[[{GC/X/[o^} y;/ | xG †v1 w^3226 5m5:g4`L702d|ڸ߄jg"2drCb0a ]fff+̞̙[-L,Y,xhIdZ -XY~JZehgkͱ.~lCɳmefna۹ *"&'WM@v`9;8t9#W86:b1%eʆ)mS9;8qz¹ ϥ+5ukk7{7Ti[ݿzxzH=j==-<y{x˽zsoT=S}||;~i~?uslYYWNlobvKT\4,$3&d0=tahK6,"lC=ǩ {/?Axi)lO q@'jcԓh3p3gTx(-;'@uqm  K;$-N,JnJ$Mv~^}j[W+_)q*)+?\}Ǻqn߰T{㴍 7'O;izΩuI O+j <:ѹs8~!¥!ϵ\tWWz\m~_={^oucj7 u6;t܍{^}r~pѲO4=5|Zu=}7o_z SVb拞Ekͫ``kk5o{u(z?}sy__ʿ~m2WpQA}| !H3Qƾ F 'GZTFjEqvuU,e,.ʷF*U*+_>%olcc@ߏ )c3_z#?*eXIfMM*JR(iZXASCIIScreenshot,Ռ pHYs%%IR$tiTXtXML:com.adobe.xmp Screenshot 1384 964 1 2 @IDATx%E]`=Iu sؿ+5k3LR\@$#ArN3C;էuޛ twoU:N}ChBc 4h,X@c 4h,X@c 4h,X@c 4h,X@c cJfbnU& tut`i= <"k=ձPRiߔLWGN&աKq|) u9,M'C:|SkCK+5a]b4:7h,X=65'8¬$xVi \4mub0x{Zi7\8M[ǹmcL/k}jOZDZyt*ŪrצFVۡsC͹k Zj5m7d4rFͫdṪJưZS|G  4h,Xhm`c4mvhN7~`TF]%]?:z=3~=Ot=O[yUq} ק*x@3T:ҽb/˧^,mزeo naSק-h?FYlebh(>6zɨSn:ۧ]7ϳZ-3֪Ќc8<5w8|2Uk@ 4h,Xqo[CG7\a~MP1<-֧{j92`oq/xrc[lp N\0:bB*ÆJ r|S:V it1iy )] -e0p-Un|,6| nq/xr!TFZq} )] -a> <1\{{ b[ nSuy1!Ka15L^q]ٞO]gV(X\<~hPmBc 4x"X uר~p}; +ݯ ȬK3xi߭|y]]RM@c M 5 W*uqy9C}צ0Yhz1F6qc 4h,n~nʼĺxF/ubufץ3MgrOu3|uc~/'mtU_U^Egi܋_w>5R j#n\Y rk^)&X@cO롥կNUypӡ}^C?Vuaf 1r.űy&X@c xï^N<*:pég3rr|Rr+Oi>9^x茾 4h,Xh_|W[4479Y|rQ-N 9^* ]p9XJХ8|;hrH)K$];OuJ9l4tpr5I򮓯ch锃`uR\] *5I㝃uO׳nNz呗T)!ӗ#ՁuVF=)Lj)^Pߞ7pre[<t3sz]NGӟ~JToҹņIme its3|=BɱdX\E˛0Zl6 n>6Q^|H/}y 7oR:q].#I=OS4r)^|mSX@cK u+M+tu+C^ZnyN),nʉxH:uxp b*iޗUS!8*6: fiU{\oti}|ΧS4q}:ѥIU{:NҼ[F'W٢.^J|:KקSn;]U{eiyK⑷e,4y:H|R~iL] fyNq2O,o x$C*.X nwDzMnڠF^vMkmje)M,6>+ݫ7XϗY3X_7l_ r/>U>t.̪cX/Ŕ{:kOˍ}pr5Igr)^UIt~{áKey^`yO?Vt&7M7bBjdm8t}pr0dpr5IgAL0cw?t& >mRr7=>il&&놓+zUL V[ţ7h,X@c5e*s&|J++2mi=,MU0>x3=\'}<|.m14ChWli,Ki 4h,XHZ-NuI>KwYY.6-mq/Z,uhs8-U n1t،oi,OxU=.8qH[0gp8![3#^H5LgtVhixk+o]nk *o]jo;P?1RxUlZtR不ޖi:!#orbg}.o0u`)4o4-V4]'&zNCO gṴ- KOZ>k֬l뮻nYzMZMÇЍGZ8rF8]^t9zԥKRrtV7˯ :iM_֋r-zyܔ*yFgVљ,pʫ)4ߍ,6Z󖶸rYx󲠭|fۥrsyņC|WQNW?tQBKeAY7o:Ud6?Cti31k.L;?Iq0KSfi=h`{\M{uvt˕y-Fs10oKϜ9sӗqo֍]b^m'F"TGCj ur:蚶s$usy1?]- <9,tJ']J]Bdcmeumܸ1 dmU۩ 'TE=󮒢NC5顇j]ݴѐ'4I,9]1[hrp˲-r\-1snz(Ք)SジZ˷J>ȭ2>cp<YEi²C^PeQweQVgUiP ,{Qީ }lH-kYG#/uϡwWLκWQͶ+gw/Į#N(ewZz˫P cSޚw uokm Yhծ E:P*~giu"Jz1-eezNE1)gGy{әgwOn&QV}H|Y*o.Oqʏ;ws9gg=Y_b-w}a+WX ]S& ҰXtNLjB#dCL|E) ,xj^@ڮ bmRL\mtqlO4m1YNb1HqfsJ');t2?ܸɾe9tcb!=Nez%+@$3WpaiP" +k(JE(bE2-Hfi5P@O;{6hpw}g$kObs@<-mž,M|@V0V;Ri2Kb#m8Yx=^`ͽW/X`pG@BK%4"_8E-QȊ$J(-Ti |2rhA,`TŬ&e+[BD.eYqGW2:T;K]Dc 4X[hwxcrЙx놳&u5ZY'N8vٲe+M6nҤIo3e_)k=k{0*Rr/t;8idg zD`Vq-=,M|`G?V6;Ih9tG/*MZNCe%MnN)~y/,dWJꐭö( R~>FNeb/mV~FHMc#<:*3Bs_ @sZ ִh)rd ٨Ig8.aY;2邪&'Oe%w?VLA4rΪ{X0 fvBZK9Tvpre^gFV&YbEc'[R/ڭQaRsPG N]eC+j3m/_t)4| l\}*p羺bOݹ \խon%Q !LxIe+p5U{\X_aVN8(,c1"0!U˦!ý-6xUl,[}7];A^!2LMhUEl?KZ?i" e#w{pp0la}sco1] bD ^d@WKW\ƅ護n 6F `1F`j7ȓ*+R|rl+,&Ơ=#bb(VnoX &`H^M%h|`kOmBË`iǒwBк)ӕYRhӆ,{-+ƯKmUJRTjl1l!1cc5,E'D ߹88!K. n2ub?~M! ]e\?9̘9Cև)rSHhOx8֭hH.8D\ؽկo8Î;[<#b4'{K L@Fc&Y}$}3ŁCV>@-`-3ҏ +gyY7k=gfvS1|7+{8`~b1ŔY8X9PV^WND5u@ k\; \~X2\{WjŭGA[oz裏+ &rn = ,\/>ƹK/Tq -Jul-x\lpdäCFpj`\mL6Un'np8*[ۨ riA@6gl -vv|ڤn\ uYWJ(6-Y[RrRͺQ>^h1cI_n=٪}M 1mlцB*v]˥Ki}]x)pƠ=l.T[,\4qe>y$gL#R?89WuS8N;{Ikvnhߑi#wo8Cs H~7Y:ugLOW1oyT/*ゾ ;.cs1^7G3w`}9[/Ղ UW_Ν+G8Fp}_q]v>ğ8SO Xz^U/_| sxTbjELlN.h[02(OaJ8ԯrLqH{x. ,gP{5VVX ` /(:aVv:8t9gvtʕ|1rv``afmSqa`` 7Q6{HnYr{b g7" V/yKt'?ItTOvZхԾJIG Y KsJ:`9J:꣨%ido`"R 2],}[Ό#ڋ~95.p,f͜)Wűfyf'Omތns fعEpϻ_0`玧H#Lmvgp[) Y (W]`uE7v}qm޵MKW}3I͕9}:Ya]wQg8: 6W@i*h䖛8#ڙ'?YvfO~GvԆuAի)G̈́e ~_[3y3剰;\}B)&Β'F|gf8'+W.W88 YrC҅@#Xp| H &t\.MRnYBJ(dþR; 9~^6+']+]쌆cy8G ؑ?ޕ\]qf5`חҿW\qE$NI($\8L u8iSׄMrrI'm^җCda|K^KO,PΦB=[Ӧ\x[M\5Qs\5>,`|w33xfӴk nؔZjWnW[-x[m=d/rv$kђ%򉙨x<:M#<$My;.@€z,JDtNy8F)rcUя)Ñ}ӛE Ǟ>/ֳ/_җ½ޫboMv'l?#1N>)؍{p5ׄa=+;]9Xַ5s1NYS8#‡>Yn{ü;(8s _W \C926[o?݉K@?.VHq Cinmk (mM0noq/]h.P9OUoY"A}[PЦm/2/˻8`}OoAX!sKv\+u2=e":g%N[G5]\؞{0>`ux^WŋTW|At-Tଋ[$ig>9pg+2̙BԸE{ :c-Ne(zsip}x w 52馐7ȖiUŅSZ Pe nn5 M7DNJ]x_D裾ک\Ll&e/:ՉAd88B:CC@'f1bbWoX<^[Z8TYX8k»Нszk@n^[`HltOկR 䧬BĒ@ٹx+_N60Y"[I "y?VE&PwD*0W@iy*aS6<)远iS~M7?~8:-o~ :Rvv/7ӧwe]ٙzSVp^HI 2]n?.$P[pgMw}r~wy. g7/BՉ>Fy]nxz1[?_Cm^m6JDž%´yn ǟ2mŖdr塱vkۊ >\h -cAcA=&MW*ȫWi#?Dgb`nٝcg[%O)/k -M/cDQs]#^5qW]{xߦc1~}o$׿^]ΰUvL,9;|衇j펐 Ƴ7 vG/(~߫Ͱ#;ü7֭3>5M wyJ3#Awלo++,S{\(Z>0 TiXa$,SbS*)eV e25n#qmh,&ظ[^ft6z(Lèvpr-L|`U.gP(]ёA7vUa*b|INXV%q7jnҪDu,b,X̢bz|;m,>W|A$0gS5M`yLwqµr[x?ϐ[ݧ_+"8pU׿Ul8-ZeIKB,yѱkXt.oxi,$ɶ``q ;P\R8~:v|dL:y1&oAŇ}u:/bQҳ8MKg7z}y3tr,h i+)j:å CH^! :@<˷^ i ;p|̅'gO.@/"hA-Vҵr0=tguQx% l; qΝux1%m`\X<"14w.hР* J.D! ':ȃ<9B*ny/`ϐȌUdlva2.`#G[?{X-Ag56</hwI?S|k2?;_.d];mgq@O:#v_wMU|}B:ƶ M Nr|r۞ o;\8@v9#I_džI(!Ayp[.~[_ѳ 4< /{!@º' 9HxÒ?Řm$jhI3𹒝'O0Z0 IwpYs3y3Ȭ61bʾd¶3>=Uk4Щ]8 E+ o;vs#mGu7E=8>d,sS ٵIx2ÇuavmQX(z>'9u<5-$n N`ʼo(nGQG򔧨ģC_+)AtP ^U mӍ-DΑÙkh[nE@[? OH|k:B8'%# kC_@)W-ZvxDZ[tQBy"XI l#j@Nuq:AVC}rg[߾ŌUS'-ڳkJҖ7N)|x#84}`ZJu?h7bqn:j`C=#D`yK.I},'oiN}{;9DN`V 䢛\{9?)F@+IL9tTag00!mHC,b\ڔ/faӭJo8u;ڤM4UG g&x!Oao=W&e8h;CH yguN`<\vڃ`,m 4^o vdqP8`G1S󻏃p:aBv,p,Ҵwp O]!o)-}p}yN:O^r%rUgWO/OњÆ]yߴi\ΕW^N\9;E_g7'O.q6rox*[j*ߺ6+W̝tždpp[qr38C-(26VXŁ)33G?lnq'?qxի^N0;[OWl,s/5\!B󦒢ӊ&xqbqt[uygN Yi{O8v32NVRF;n'|t\V*NJ'?AvVĸ\4r2սҬ 1/J3*t-DeK&S!zW8KAԡ#.Aýޣht~:O t~:gy;rwEEh``` &Nd2Gؙu 8wu.r4]O;dI!ct G_!dOoQ/g|zTII#88~M.؝ /vp>S,<2qSv ^.4g'u' ^qˊK e |/ ]:ښ@13㐝> ԡ>f; \G87Svq@Е[XnC4\ -^s#q\mrc`cڈ(^љl0I/Yt4!yE6Zn5J_R`DY|tmCADP;?p22T1oY4WȮ7}h`p@ .Ѕ]2 /d6p ]1]Xv 5amٛcƬ,Fl,xl mE"7$+/W#>/&+85—ʦ:/r'C cvlC:p5'b]'./l!} |k"M7$e|"EYQVZ6ԁ;ܺ3͗K4>;;찣" 7tY9-Z)xzЭMGh'"s =FcOJc;ڙ'у;IvޘB]K+شbq H,z`kE~p;=bu* ܧ{#Z,N`I4Ld/е4^94Q4HV!,PP2/&7)l!*1$i-Q2Z:y?~\̗YO&BʃIc(<tŰMGml!l Eu] mRbc9@gDLJHZ"̍)rzu{ӭ m&ۀcQWh[*TΙqǸx3fÍ6o@\Ia8ESp=E/uqWm5 y: jniR'BΪ&m@Q&9l!AK[+Li[:{Vi fX9bxZi/ <ݎ~J"ay-Y>X8VFTe@pyN O1&E>[4=':5qoV Тl5BLF:S8j@8:InmSE v17ogA֩#@IJJمW <-T9۷]~ib.,. ~xpBL JI,u~*>=ngsS /s2;wy(Œhޒ>W;j}HD:RD̳yW%0v+GŒxE9żp"펿|`Uq2@op҄ny_fHuJaC&Mhq,^8oFƫe?ІeuT'FP8 VٍcRM] KAg{Fg[ErH$oń`0Y~JJt)餄BĢAJ%]Q+P&v9;c[A5q.:8踌cKj+{VQ^aQ.SM=H]4HY"eRDPɅx꥟Im6wfpOR4NFB2ZnR/dS)\UnJbióp,KpAn\`fP\C֑/X6qDVqoEƁJ\ lTds>-뷮&+j=A(tXN*NRm:B?r,[tjB Lή| zemROH d|zbm\ .@npKU& tcǞ'v޻la둆e5)i;ltݹ:vir:2<>7˖`˓Ӥee~$,Sd*n0c4o𞱜gQ4?4ʛwX@c = CfҷTΩư>G*#+3>>VV⍄B54671211Ov]wUJ3&n,X@cּ#ع+ K.tfv ' gN/ gq]+uhMAb_niSض4ccġ#}k_;,=&X@c ca;묳?Z8Z4]*O459Ss@ YY׸ՕIRh4pn0S4g=_-1Mª  4h,XjbM/zS5K$`+/ϥH}#`Y%z 74̗ئ~;W>K@c 4]͒b=׵6TDS|+}QiO3J[2U*zClin  V8Y1x:t 4h,Xd/^1`ubfW/?aKhYG(纕G:zڮr3El J[clt-Zp<ҽ#wSpy[:FovMX@cǗh98Ӛ*%3 V iZS˽^mkm3dbZU 7|kU{o;X(evW!_jua8pHFp ^=mND  4h,0-ϼ=TmyyieRoq\;))8._SHX򔥕uOq SY`#I}=,K)南/aV;F=&X@c8V,ěniWq?;c=jIႻ ;3l4qla;lH{ٽ&OvxpN 3̔ۯ?#V!OQla>2ñZ[<pJwsz1Uh|Or9e` i3 ZvN\nf}6lӼ' č 4`ív0eʔ;{࠾]|W׵ ue`Fa]!Vne9>a K.]8u w>r{xÁo O:Y HK[YW)5{Ц4i+n'yO)8>SVi1r2\5@c z{ڊsׂ'=I3jv]cȎݴqıŠq>SV<[amFryO 01+| |aGux]=;'4Ŏ^Y)}lnire[4i;u.O2{|i04Wܗb?L:U֕AGk,X6_O6Mw{4m]Z~>J@UWI,+~Cw_:icqOeFƽhrG|ղ0F~yaᒅ$[{]bfySϧ8CS G)pJX]+\%R)iGlᱶhY]C=]Ǝ~A9p̗9RXZÿ6]uI[Wڧk '-۩iѕ"hI4h,X趀-ģ[F~,`V>\kѭ旾@!ý*7ё&7bH;X^qVA)np =8G0*Dh  4h,X:mt1M9:J^3di G4U>%␙>V qwK!Ń?t7Z+3zפ& 4h,-\Ɠ~Cf ԚVgaXyT=Q7*JY 3X3$pR^t"~6QZ^p#; G!'IRg2,-Odct7@c̟\gs\i\ڏY㖬X$NUј1``O{SQ9x(Ϲ UpO3]w5)S %]-xo^,Og;-Kq<1\)7qck#LU$3:_s5|gxk^^җo~aѢEZvy_W*[{)(yUs#Ǝ^ ᔹGXc';& ܉O,>O9Qg?IiHOے[pGr AÊ=YٔuYZõaɛ%ow޽p--O Xn^xum5E}7x=(Q7[oZtJݫp[榅Wؕ>~a =ո>Q:߶T{)U#}RxR}Yprt`HӠW\qE馛uxS b.S'bs_9IC?O#N:p-drVAc"C` n0kpv|u6?w/aĩJ_9w#?/̜:3̚A8qW}i&hrLhe|Un^O$ϢuS2^ rpS7ݍO̞,ȕՕف=-//| OSu\8Xܣʄo|E/z.8V{챇:*gq?G?]yqa}Wt2HI`}:8~G>u/xAxárv.ZV,_vo}xӛޤg'}rꫯ~zKÌ3   uo~3`W ;^z_ !׬q!| R0s>OY׹s??va#~|nwc;e+}ovp>)?$a8fsq‡zW8O9mo9"]}I+©}> [E-w-燏\F!>#w<6<;Mg} U[GUk'3l8G߭,o|3'׭,9X׭"ҹ7;bmOՑOT b) pۍvsxnq!}QG镐"׳@yn1 oPync g]WӄsXwt؊'3WZAOr:*E͟ph>0qDՇM}QOL,p[ۏ"jG{=T瑃`Q &%N*v``'1±ݸs3k 4X0'r8]|2td\|O;A1`^h1p\L>+En`R,#\Хٛν7׾(̝ޤcGZ'gيea)]ueaԧ+Uw_aYǩ&Hپz+~W3 oڟR9uJ^9+X]K晄P"ni%y騪tƷVLg`G>-}ݺcn ɱ bNjv`L( Th,0C9;P\%OZ_ju!P5d08qxpv6YqI&8_y zUXHsxq㏗]"N rHʩysZh3gxG:SzQg%],zᑥݷ3ևnT w,6X4jEִ² _v\p煋_yO[SWngkw_lsl`?g8'˖ v8jLp"-sh9K!6DDN;zâev! ٩7*yd3E͉2s]d":q&~hXڳ9Yeӻ*cӍ~ op^3_I?An4aOV[@:@BEÊoɁ@+GG b/[OCBnÁ n}uo!$$ &m„#Xƃ2(x u<^7I {m(b b+r&>!b_Ɔ=ZH_ /Pq_ 30>=Uu\ˑaW\i\;w|PTI*C'{{Ԝǻ#)P4y]dEx7wn_w֬sȱaw߁DK>fnN|o2^u d} \oW{}[3N]ٛ>vt .qwI5s;" OYwnMKYzXnOwO< %bxWt.~I2 7~u @(Q_"=$I7IyH4QXue8A4@& O$<$U'@6í*l|d?$Pl:߄XjeQK@zHQP  Dq𛓔۰De Q)cժUTU{?v4b@|#{Y?\!CK!`#\x=.H2\x E-<תkޡi"Iľ(p$$熎v7i]3U1M90D 9UWw֡7[> `ݰkN[҅[[6f.= k!OhT`9"2m mg77!:߈G"yjPK#Qm}߱2m(R'>fGsP&.+W= Qzu@bMHګ! k1kyu)ǵ_|6]OR5 A[C:Swdc7"v]3dƝ2EٌnlΗ"Vomծz}&d5sN \e_g'[%FU+HaYgP/3 l5(tzX ')vv eCmacza{~bzڀ Z|f_ e!y׌P:r\kmnf[^?'$&>GNJ~AEy*.!X#nNlr7r|X&5${ϫC=X!4 5&o]A]yLNs9n3e<~S?٪[F7x1b5z=Oϭ[&D<,jC`!z^6eMj%[Br1G?|kClgH e;-%X( u湝 gemՕnNmo:,C'’!` V_*F"\mguJS2$*\){#=,rltU~T%Td(H+ScTam!`#EqiL),p؁; _nd1R`1PY-F(8ߘr>*rnm!`c\Иo|`PN10 OXW: $S[f\ԓ[h#! 6*Y6@r#]S[`Uz  "{|:mzGa!`<FA]0O]שG%ω]kf7{[pu2f`vOU F zqR~L8cRG` 9۪JlX "O-x ƛ{u򧙪AX0 x2A ;Y~y3Xc}Vw1 \}+gL9AyzC)6%V u+̚M^⮻/Y=fbiw}7ԏV/grc qszsԫaL'A1~D oV<Ғ!`F/\4) d<?_  䗝x7BX52q [ƺtrpƢ$jcC839r}za'1ՐB'_#ޒI7Na8QtK!`Up21d!/H=:SI[nc Q;- a^[` @by^^^;#Eyri7SQș!`E 5C_X;㉵O tJ9 [Wޚ C0 C`" ?A 1S֯燐Ml+fTPQ[W3>p3!`!`<&EZ< *K 49U!om-7 C0 1W[֙#О'm8S"Ɯ "lrU@9W C0 C`# hk#dT˩-Igژiqx6"McUvQ]ژmhG6!핶Zclc|%GW淇u+/ԩ.^lr/sNz#'0n*qs 1gY}aA=YM;lKɼ1@@dZJ]>Ւ\NJ_4h-++{pl1FRO mԥw6J?5}/s+0Jwq<Ҟ Yz~Ƌ6U2B}!GMڟR]3O!DZMDQIX΋!3n$mgh4CSdVBL?ޘK@7|aY[ yifG=_v/m9:1;ꓪҎyfO˞ uz<lyX2_gdlJ*xiG 3Xe38y Yg* sϴ/3(N{a=k}0*2Tvau4o㕳XRyyf%ک<Ѭ_D_}ezhY&G]fSea~{h:T:ӆG}xOd|Q=fy>#}epvhLUvILÏe}7u}YُAn6bn}e}F˥%X  G`痩6֣\=0ccǎ~(&4 C0N ӧN]"_oё0c &mʱ>k"1lAFꞐӰ|re0 C0\A|N]ԟ5@1S~àNؖ'r'Gs C0't2e Vl08&ϜPu`yAlgh9y+rWk 7Y;%F䬪IʞΣҽ95 Ch- \< Q6XSz kv@IDATG6`o|e<"ĶX' V>ak6p^ArCwCֻoMvt˛ACձ#}s-O,kд C0F;u̟r#îQf$h d6?f(hyooovK!v$x6Pb*l'r2P}QmIJӉG3e:&uvKҏٕaKMLyzlWߔMB&t2Թoa-w@Or\N5:>IB1VxӱXf!p" X1 bI !{L05k[v`9rD}L:׿C{7d$p<7QB]~ܹ/CB_\5۷oíH%S / l<݂ t hC? {јΝjǶoA~vnۿBʎ3WU?}[C܁7U'[~VbX^N%׿wC|ut*Vkl,EkԿ,HyalY_Vz랧>ídzr*-GeunuZaiBꧠ @A."Uܦ$S-UY2h"tR]BPoag>o]q7Mw뮻}k_Su bBWgӾ>:իV+W:HZ*3}cJЈ| ;:ұ >=Si1y6}Oh{=%HoV}~-$LB Q;'/1!Ey˞6hG<&O]vwv<|\x擄0ݛt#{܂z//~î[H]W|!`湶 C0N1S%s|ubDal5ќ*5V@HV @b,}P 5Aۺu;C)S(3  [~ >+Vhŭ3<=C_TqrC9nQ%{|"#xO#AGdk{ OX/Rn0 =&v}{4CVo#ouK~;vϸ9K*dj~=YN Rn~ ܤc_O܂35sɷAv%$p܆xes%C0 qd.l_ <_6*x%X(oEm d.~}s?rc_JW~~ 0ď X>?:kUp-}{~oXvޟ  %b 5=ً/+_ܜ!6'zUB$i}jR1/JM9mvvޥ=:|P|s]Y";RxU C0?2_4b.GPא, `t (#Qv\jhy $DCp[~7jvfr{z-Cp>,XPo bʕ+->яo˾+W]unoᛋwqFwJG>}×e%c[lqyk b S܎|߯籏 a\i „g_i6$4m2TW'b!Jdx@5WK4sY-_9;0!`A,!pY@/(v2_/:,3?P8: A`̨Wz+'R * #L10@X@ʞ񌧫n`aU U.ޞ^?x{U2[}\$L} =r]r%U- }b{^E93&_YF7գ7>6ٸL!a塟Hgg)B 2ې&|G6LM藺̳C!TzXB(?53V0 C8(J&8䵚2U4bJk2GY ^MCѾ3PkS(ysM<`a7KMv)) mC?xbŪ $ /Aǀد_!VquKuDmL?ƃIg" Ƿ\,!$Co)6_rnACNN…oګ+q [b!,jȴM 2iRZɋ*Z dX%(7(yC7a2ϰyctթLw _ RH 2e2YBeh$|=0_dʯJ~O{q 7 /P'&?%M?LyͱpQ ~>gñEdA@3%K&$4HkԶU` Q @Cc;lnBRsUh e)Yq]U2/2\z 9V7,OL6w3ndM C0727;pоP77j,7 9&:«RjFC&rC0 C8Oj8@bvߘFmj`8EɊ"kOA,Y+!`x5iFm+Rk4=[!`!09f8Rq`ꥂ֓<點!`)E ea1s\ex,z0 L^z$\-AѥrC0 C8@afn8Ǐ-Bv=jyYLVXlSGd s< *3GX,#gCyU'^{La}uXf{;z9}tC9)uFe:Ї˨;6l(rYucmzlG9v/mʴ/2e,ԣu&ڣ봣n[c9ڑbANO/-r$Sm3Q9依2 u 6lC>ZPԏEiXT)C_~88rn!`M!ū##G̙31igp^̩WGQdR@$f}AϦ̘9͛?O~V,7 C0 @ggۻga:t. y̷Gy:Em+6tgs kts} W$A+([5|qÌ<4ker3fY7Zsic.z'Hgm7% &4ȓgzu2ʈ=z`O`WjCE#5U2L/=m~9 \a;szf6Y(H#Om̪0Wi0VqS'0zNX`n'-X&\:b2I﷕*%XzovtX_8Pwww;:oGwXou'Y4͞=˝bE>1,fw"NAPJ07>r؆zaȾMFH?k#8nh`ɼ𡏷h!?E+XBF]%QA@'x}~-*R=>~ruLjyhU\ㄼp.r 5ıw@Vs~+}$Y=?&}c3nrYrA_y:1yT,Ms/'WX~jo6/Eet|{޻#){~+h/0aaSd4$Z8y^ C_B9Π,ƚɷLʘP=3~댩nT*dw7I&eӻU+{ܭۇ.!ZOX.\2x}fq w5[He * Q:^چ:R/e0er?w޽[ ulU~vo.\zzzT_Ov1 {W]t;XAm6xbw4| ȡ3k,7}]3c<"9ӌ/1&"J?l-¸u"IĘמHSTL|/U@qqe/g+bDȓ<`|++¥{pDV}U3܈0òzS@pd a:tHKQΜ~ ׾nΝ?_vx p<vo߻׻׼5o{aW߿}{s?4Rڢ2z衪m?Hs'fC+~;5|ov- ` "Oք͛箼*9n+`WfN n45%ܯ6^җ:|:>ܽ>S֫s7xø/yKܥ^6mW_S'~o rnwy["$b!P3}˿=[-lћ/[ߺ{rrvՂmQɭrE=n` buL&;wCBV +fMvM#,mP׺ݢi]*7C1ȘʧwGLOv%'< 7r^{ h1;w;Zs\ i;32NZoyĨ]-Y|e VfB>J^6>;v~o~wbO:#~jgើndF"\.;On3~9nLt^\={,_v3eHd4tM}{ۺu,[!8-ܢw<^&äa 3N {t|11'=)mw!;B]*=jj~hanrAf*zIvlKb!:OòG߶׭?pĽ',ͻNu g1#?鞼jO^y4~B6 f;ݏ?uu@;o?2 SmFguGs=1B_Rυ9s(A;y.lwqz{qϞ=E/z6ù=yۼyҗ~ӟ1&}cJ{kQ]BRH.!?+'{!iX6}׻#[s׷wk/禊tY9ă>vuB m?jK5pmJt{_$ s~onu<Γ_b{t]={=¹nєN(ݶ- |uÊ'> ]ZXWZц~s= }(lDZլO~]zR{u/r%{ֳȿۿW9Md %C65Chy/Gmޔ`yGLmMUd`!?rU`%+'T"q^h$aGcm70|EM626ҽB0F@V&jyjZ4)jz=+j{Ws P\{6fB$ ҥK ż]ͳ?yw/$<2@&,.9zPV bI:"L?=٣", {\g ? +L#i`?Y0rA2deBEoWу`Ҿ~q!d+i8nRN#q2c4qVp^밒C:VcIh /؝wy__VGOe9nڵz|t]tG|H =rƊ /t^v=~@ƋvwCY2 Sm۲=(T阜ZEm)^=:*ju^4$tOH^FZqDBEW҅OXp_E -V`h&]#'eq=[YՕ߰oIA1i xJ4{h01c`>m6?bO0\}$b!(Y0=4wW/vee _`A~PG6๋ [d䦁nLa=Yi:w47Gn,91ʊov 6K?GCJ|y^܌JApn"a_{^]u +֭SM:-/~Zs%-X|/\|>:B;l͜.Z]qQA+? 0\R7nau .p7p[jD߮陜6*!X92mypnOz5{u]eZNjiR+h $ %!O<_Ыҕ-yz |;u.H X ߸;8?ϐvFYX]r wÿG̑77 ?y~T&QjpI $) =nuXa/&EwIB/K__PE&3L@RaCu' l67q=In HZu9Vo?}>wlx` -;"<,Kۄuҋ7]D>Y~f`W@p+q}k렻Cҕ3d5;mt=p]f{j] [Zގ?XYGz ^^W)ƪ2nanḮZJ! <됃!?_}h)P<*|+V+VmBv:7eq Dcŷg+y'l٢;X2 SG.x̥6Ѝ}~r\X3ʙC2_arGcW, C_le|i].=e~SJt_BӄiC#nJ޾}ܖ]YBŠ &.\!YGg_ǞB2 "' toV[+{%5YZ*$xM'xـXVv:G䉇=I@s#K?8V~aq~hlwP!YDm҅UMb"s~q<٧s^KZq+|]z.OH<{Ac.!]{OiCˍ7F'?RdɇޞnYIO礸\. wrWy睝ebDIQ<|b@|Q_"( ~4%K60Hf|X ; m:ub, ڥPOufS^pQEC?A =V@V>!<`A[,u녿0?f_&FL0H$"KY'jS9|'$$P9B/n&8&eX Jr{d?|^VMXt7UH|FBO+ո[b+cIK\h-  &r;1z=^W| 3ÆurX|k^\˽xKƘdlJ^L o22t}[ȚM8 *f!`ބx[)my嘟<]]ml88Ww'x=A:ɋvLI(LQDk.$ײ䣝0Y!a~ܗĒdBЗs6U[њ9pRf|+A&SP2 lR<|[QhIn3,%դ:xۡDrrh+'+r2 t/n'u9fbRתsZ2:$B.˾ jr$ڃF~1 Uo,"xP%i80Ȥ<446l Wm0>'ukV;&MժkV;.q>D:"  ڱߖˣ>|#%{X'CGkΖKc)R1ZbjmB=m"6#YW/8|Ffw=)iIX72^;m4Xor=5'sf|?itB^!C;2jLfpV%_&8*9ssdcnDF>`P#9ưrTIfE`sߍ/cj'wȹ_tČN}'(Qm;e(Iyca;?{ƛoKs_e ۨS// Xg^7C}+9'%}ry["{e,o1yRH(ᅺtoVe V廊^G~PqlҊ*e,L%/Zi㛆^ \ i%  }R1N~tB~na'6JӨtjj|c S+qU$[%U|(/3%XAn|9ezC.ŏٍ0G1lkQL&F[Gٟ4$؊,djNJEK)!*,9-[(/~ v $gչ8΋V6 ChmeH|7k;z2v-\ʔr𣏕3;N&+V%(B> ?D -U弁ryR 9%*:~=!X8I{BpQǙe=QKS Cz`4TLqfVHV BpSz'GL4pdeY0 !MreߴE]UXoM[ENF=H鼨P(T#IPPCĪ9'o%=_*PH!1X0 C`47[O~nIZ"߼-Bj0r~t!œ*W$ҁ&؃+&mJEgW7 !pk^ykVG򁢫w[칉ֲ+X  >`S?L|%XTzC҄G@Os'ZJ#ix!`lŗ<uiÜ)/%Xҝz1^sVD L=,+.YO .{ƙc2C0ڋ\5|YCBp\ycv~9/;{ 4i]'dW44syrR%/F*/ֱVC0 A9sVرQL?FbbI &E*Od)£ m' NԐZZ:Od1^ !`@A콊fxMcFAunetv-d0-gPף)LR^C;5O-#ἰ1@s;4*N C˘ e1YYUcXJ"(0w5PwU/.pTY'u7@ ; pndYK6!!0Hlω9*7W,1{l\lmZ/.+P̣&(Ɋ5j7+]N߱ki)E2;0 6mrŞy3~sR΂^}~90VC_TꐃPWyME@NEN[Xl`!`x[U^x 'wpj*aX)Kb~9)VMOۂ1Dkob-=zB O CG!䵑[p hhrY1/1鯬-nJWQ5NPYV6 Ch'O022u]C}!ge VQG~r,Via+^^188~|r[՞ C0D ܑ^\ʶ,*ml,z6y!Rز(/(Xi0 C9,t%vڲq@Z5*&GP(!`'.=]cʗԭ~sr[':iw`G rQÕ+*ј笌v!p1qF722"f 9m𜔙3ge˖5֕~7<rJ!`'":sZLƶz9#X[vӫ?`^%cU} :*cO1nnKnb[auȑ F6M38Y !`nA839AR}za1ՐBkնd65ȼV55Duwwɓ'+b X7 דd g!`mz(QffaN6/X͙3Gm| C C0ڀlQ/26̛S>FbF(oEm1_rid6- @ℍopnm޼ynծ3#Y .TR,K!`'\[Kc%h$H_9͉x4gmݟc n۶MUOOO ޽[W+Nt!LrE 0 CA@:K5'&C{ܷEluRDf FH,+Da \ 9wu!]ѱ!` 6͸b3܁~{je a{ <<9}Զ SÇ0!(.(',Qn!`C@ %1Sj9@9hbvHR&&c5,2C1}0X-˜l+XU SOC?:V6 Csds|dW}7e1YYUcXG5[A>Zqg$XɱвOf̘7[c"gϞ=a!`#s8g ۋ8R΢=ąŤ$Zaՙg i;zmB$߇ !`G?,MeiGC$)4V~9.j9 ɬ : :ٻwnXǪ6Vϟ={(g[͛7oTc4熀!`!ford.rʙ%XUk_+KAm{6iZ ZGΝ; Hg[ J^$*e!`@@loІu+0v7\n!ț'Π_f{QN_+ҵQC 9 wv;v=V-A27e VYIzcyz>b~M  Rx";S,en0 C\#I>_`߭_OpFrYnGl˜>2;Qh'ABO22.\Wf C0 ȜH:.aרmXYU4Fht~5}Vn*> 9dL .t]h d a@eY2 C0E gn$U"ۊEVI--K , R}!HQd‡]A=4D$ idxs|=#lkde Va L?*iWH8Hn!`!09>g>KQ`rg%y"bYȪZc!U,نۑ`G !`!FIvI/kcWe XVJEmJ5}t歹8>i M8mAj(!`)CsS5AϨ+7ioXVPC_iӶ~o>`wd`|7}t] ֺutC;҅o Ο??O0GGg?m'ǎc!`/dy?|kԶwЙ|%XߚbQ5e sM*VbPSNUyzßi0!`!pșC(}PO6l+U/K sjtҕ+< %y7v?rcG-$Y vZ7008)SAg0 C8| B 615e V'@}bzF"')ƒCׯ_T VHNܹsUF}kOAɍNۜd vܩO WE6:-!`c tn961Y;V h{[=`6'm|„2dho d/_neC0 C`, "s2.G;V@A !"& J)0 C`^Cy|>Bcޟ+ ,It'SR^­Yf*J]TH؇5{v!yLn!`8˼?@ bh&OARy<$E{q^HV Z|[byFp2.ZH_8ǗY0 C8BW(ަg,|'1j0(dKըUZ=b &-`-դT3DMh.@D((lWsN׎;Xk;~<+X'xjmfH`܎ēZepPs%}C~[[wxNTo^/߹-(Bē)D HDq P}{p2w[*Ds-Z]9Y<(CO_}Zy LҠ,/;/;%$@"VS7?~'~~j/~^?tWT0}$@"$aχ4Ĩ7Q5Ecn)5{ Gd(w{ۻO~I-?{կ~/ |~տ+(}PɒMA}hȘB"$@"Oyr9y[[܈9С72tҠ勒?D Hg{9m.XK%QQ z![J JbȂOt)ɖ~gp"$@"X(gљsN>cG}5Wϛu}#XA>(<3V w'u]}/<$MD H:[;ϙJEߣhcV耵dȆpsМ~9 >@}*?G_׿^*f}O4x s֝<@"$ rp\[Ck5>w"ƖgĿ`1w&Ր˿ߨ΋(+__}tJ_*++G#D Hgk'3DSy[z^JGkКU| ^|~u?m};aɫAJ yQ&@"$OD80sX q-\k'wbhiQds^r/|0"_݃#ҥ/qixO?sm|c;CN$@"9?{:7a$׽ӫ'Y9\I]"$@"2z]ǨZ!K{ocRP+ Z9\PI]"$@"p@_>hε2kגx`Z:j3xL$@"dim@Q% I|=Ys2͠׷D HD x0Xժsfs|w5:`J 4WEŝ j4>Cgr"$2`y刾=4rb~tZkd[~+OD H'@e?gv|8F+I؈7)ט{1$!:iKD"0՛zs:jٖn1`F- 1 m:DI#PLosn.H=Ngcstu:`] |k/'),to87$B`)B}`?ߨz;Vsy6`yrI0Tg\Zuߑ/`[˸x&|(:c\RJDV]ߢMu=~<}N=64kVroW:._2pPDQOJ#>py"$CF;޷葸XK[@ZsZ`ociv+ጼ(]= G„*D)w$@V~3O5K U=}q|'\"$rw3Zͣ1yw0cI5R⫸\i0~:!)W"j [Ҋn>O>͛2>aAS<21=q<!$r>5Vwo-|ؠOͣ'hfִTYO]<P,}K_?^f}D\@}_v$cGA_5kPx.Gr->eP+z^ɵ O4H}_w}o޽y Z/'ɔay6[O`MXOIM%D`p?=Z̚mltCҒ0k6يk<\Dk*:}3N4H o?{?y٫Op%S^uҋIXTU|t[OrՐ+HDz\7Sc$5z8Ѥ@"sGތ۳۽{z[Ha%5,dJs\l0vnn>\\uc ,|2rӉ@"=ENq!h$HZ cCKrɷĔïY>TѧYvXr9,/Ɇc.ê\gRgɲdD x-,WjC[m9m&@|`Rv6՚M[1Z'$?NuWEw4@uzԓ|xGy@QscT46D H{@_L_K,,Oo\#YNG:YkSq_z4BȵCxKBs^q=>K]9ҙ11N iGZG9vWϳKm鰉boS-5w-qV>/S3)f#֟#C @"$v-[W,Dl<-ߐjUTVΣ&=X[z|_ޯdCryzr>d4x}V>諮V$+ژgͬȑɞ@{<1KA{_iy6.>jR Y=#ϕ$@"pzY9}TO=;~Q/t%Dž 耥Adf75+Q`> +kH =]UX#.f ykTve-Dy4%~G5O7]5ZTYS8dn[O[/Ca{9`DD HC`z;{TQSO'Tl:SŶ[X4Y|A$_S!6=>%7w^{]~Sy &A t4h6J7SkI>Y.~V-hwGYΎ[z(Lm~{6j_>TM6ix]tYV[iٚ%MD 8:˻i ͗$.ab̫Plk7%xԭuƿ,ݏ~Id> ZUU/lЩ\}z4u+ǩ&^s6J+oqrZ2O{k=RJD XC@~kXΫ%Mw#>V3xVYmkO|p/}>*&j*LCՔbR}&NOZ^DFh^5cyԘ=cA <;ziVn)ۼLÔ,YDUO. 9G)Zј(ŬT>iQQEx7yf7 sR^r&~iPBXؼWtQVkHGVu[(K"E'<^:wıw}mWmQjhyȾFGntu Xڦ4Fؤb^V_VwMir:P5PIU&ݿ+E&(zU&\r{[_*΂w{e<Arsyr%|rS3Ӯt=u{C[\fvuE>RE8#G7~ԏ0#vW[##WWz2gzSf.;[Kfs»' j$g>'Z#tun/|7V>;/zs(G͡< PP)ytض(5ր䝪aV3VrtKf Rf*H}k}STti^}ҍ[<:(q^jRND 8E`IzxO?` yMjFcAX,7x@JF)ёu`ɮO,?,A^v5жW޵.}V?`YlKgdD Hv"posGJޣ-+iyŴ!ho׳K/5hS #t0lH×QM~SWkȍh>k S.wyk}Ptؒ&@" ,Q'Uotb40-g٘H.CN?F `Q}ov{K{+}5[/D H*֚Ր[ zܖ/vbOt$U9{󱁘U[1sV^'d'^OwR+T -mcԗl}-nO^8x+l߯hKm'@"\I$sdџ<-;-V5` yj .^3uJW(30{^=8Č <Lw]{=%$@"L>@07o X:kzZQ]wsTAkFVc[1i?bJ{׶R.M@"$W@ ~JJ?Av֐ԩKyRXIj Z͡Ç%/|9P:{gpmI@>ݟX'Ƥ$@"ˈp/ |dh|t|=NjmXƕWq_GTt継'aZ;aů٢o=ز{Wz\@"$c4=g90;, cqeF, "CGk/6ΆrеߔO7ohk䙈yߟɍm&@@ﹺe߹ _KXkC [-Y7}/| vs?UюJyK-Tv&7ۺ^tǿbj~Rj]%㦼bE\f. |35= )9jڃbʪ_ V/E?K,g}NrWjq*{.i/U=Mr8+:UOugQזkӵ>ɾԡvTZK\Ɇ]V^b%Z+ykPU9ɵbO,kksRp N&J}:+Tm{}GSUϕjP>5WE0)qC/[{dSYR"jRճ]sys\+><QSl5o  M=ץ?jKV{`խ^M[Ny ]g??tkVEko~7ڵY߰W2v7W{*D HDa ؗ9|G}-{c?b1w=:CvTF-p$$@"<K6;?~PQes!ڟ`T GTv_]wķ#D HDl@?ws^WO7|tؚb?Ә;D HD xP8bo&dU8IRxÿE=N_8ʕ$@"$2X9ngV@)@h 8|t{⛊D HD xӯ_V9MI9w:k*ljnt: 􎌏#)5r`cD HD xR|V~.hwNb]`菌=a?Py 1U>GJ!HD HG(j683NFboaUQfID HD !0x-$M-ŷfwˣVoyMD||Sޒ%EV\@"$@"d˃l3*QK6-{F8,)5M@c9hGuGG@"$@"4|Om 8kܣde1[wD HzU .eZsOHMCOQ]3B_z{.?ɩ?S+HD H-uҙ/~ޛS}v_f/>slƌX~mH0P%c?,rr$MD H@JIt֡KYfNpY5Z qŚ/1Gtt: 8p!\2.G?TNZaքC&@"$ ?糝O|жdj颏1$|Ov.*OOo>??[z7gRk"$@"<3%c:K-dӂN[z})z*&&@W?|;?7җNɢBr%@"$c@`ޔ߼__׀C"ŗ+<uReӤzvJJ??7~74 ??ޕz9OWq@KxM<|g  w~w~xgh:˚$kA'KW{6cHC'Wte4(9/ݻ3N~W4 &"+IDAT~~_oϼ7aq=,M :e+媺srimM*B*=;\"&ѷv#{?0%7q?=(k@O;X9Њߧ/ ܻ뿻r׊rBs{7xuy㚽ocգقᡜr%[&+2@ݕ///C9 }~PtO䋬A-Rٸ ;c%kM@&%.~R2Gv*>(2aeZ|',Ի+ٟɟkrQs‹b_ŭ,*?.ųdׂ/W}V]v7|oHskLn]ޗby븑y8NP:I8dt'Err.grZxzp4})o]W{6ZT:fMԇ@ҽ{(?mo~/~ͷ_џ;>zܜZ_~U/>U]1M=yT{+~2Y(:}lY| t,](:}geL{VU|ŧLq"9WnըnkD]?auo{zރЧ|/8ޣz?[w˾=|7>*[W:0Q.}ǽzS{~zf>couBEq{8zqItYb ,r&Yg͵q~w{Gjу:{8Y>T}X]zȩ^^tgwʿ)>u=HWd|9GWїʧV3M9Kk9Dqt蕮?7ܕz:_᯾%OO__k~p_W_3EﴸTNKprO_J׺$JTڥk=bzl<:h1-JSD?6t2EC;,TluЙ}Ea/CIUȱ5+^)}콨]G'x<7 {D HD>wy~18F| g B/Oʧan_T|-+y%;_E^G'1U!%.%'^9ੁ ^w _ԽSb-RS$_ώ~*zu9{u;}zn}{z_X:> T(щ%bܿRc%7-iNϘ^h,9QE}>R#Fv\QFP.]|\N7P xT>__1WjiWT+RE^28ԌؑE/>wbD$kgW9/kύsxrJwyN|btm^-EW1W1.;d]:%;//+lλ/<ȋ,Xe鴠-my~Uޫkaյq1x)tڴ"EWL.Ynb㒎O o+^+RUP$@"\9Q G2:Ql𢽋 kUGzQ-B]ydR /Zͩ8 ҬG?ɭK@',>EUuS_z_:)NT5b=RGK#;]_@"$%Cr=Ȣ~)VgtP.H!蠲QoO/NhwKXT(~ؔK|zzN/j?R.E»>}yOD HVnE٩72hbJ/ں&'uK04rA6)xHMz-rtIOH'jtE#_T^||!*Sk/\Z'izu9qV=jG!Z/ ?k8b J(bkqzGuN蓽C:/?-Źߊ9}Z8g\3Z9R?∑^kMOKt.wpqCC .R~I|o)zKEYPGJQ/YKTS%tUOv҄${|TJǥ#jWK}RND Hq.ȢRᝊ_|Z󣆨wxHE_j5_.5 ;UlLT`*lĹKSMxQN\_Kz#{OD H[}d׋GET6]E{_ׅyW :IמaܹBUoHD oZdKn]:GыG޿ hOO~}/-[η|Ot{$BACQls 6.4IK6GZFYz]rW4^EUuPgvG=vw}?s:k?5X޺c_=㭱u2Nht0E.i#<<:ȣQ"qx-t(?^cR3 @X|D\K>9E="b^:8][[ش%@"$p.OG'ʥ||t*qkH:GO q5*Z#f/Kk1M- YSNs^+ҨjPx ?Fl!M[/ĻnG^>ƩV|:G(&;x_?>ĺnWDz-?QGs9Gr_[[{ɀTxFs!{>t^~K"xJ']@Fx-tN_:VK *)1cz\CK6^==:ת}8jߪpբlɲtl{z11KVx[[zkvn9΍kы玸7CMՋ}@nQ׉]:{xt+&1OqYr,}+]y\e|C]ߢO,J]]CGT9-.O:hCW ^l֢>쭥n\b1zO_{q5^6tVdl[Tx-ՅrW,H.[(7Z`a ^d#S唟tg*EYGnV\D Hp*u t/Z>$NyCeXyAɺJK"a9AӒE?Tk/^z-;?YW|5 7D HDs6֗= Qt>kKzeG'1MEG,$ .Rqdbo-+ DuepBVl뢞ۊk]iE`/򥦩m.CHԹb\m1N2K~Z[q[z~zCFhçes_HcZkM^=|ZueC?h#G?cF*riyI3JMC\=m#\TE#@uVtZ7iW<A'WG/ïQlaHv.C(odK5,/IK*JCT6 /"FqÕt\`ТvEw|Kp֨"ߒ-][`"#VsbzqJNzC8r>ȹF[}c#m;hǧxkຈlޭ{p9Kaw8j.1;z]qok26Q]w YE=[kފ٥y < UN-]qm.EI:ۓыu'@"$ #ڐ{T{=^#;|K:C` 8dD2 W>"'ÑS"CQrO=i"$@"2T-:-mˏ^Z<:k|IKg5HD x6v.xtN#ߒχ 6yMGs+exщ֒[X+ȮIsй[G}ʉ@"$CF k Um-Oy|\'vt5> ; Eۚ,]y5n-hzK6@Ʈ++>ƹV:ߋ$kmQ{|O:8+"[}u8irD[;nV_mu4-y)4{̇Zz'(/|O:껯kՓz9E=o~QVr^ˑGYyZN'uYnG]sDGa+.dl#4Sw^[akQ׉"$M-#/nҹ1Wʉ@"$:w3 Y4\<6~JZGklZ䟤-I%Vl"UE9:r@c.N#ߒ"$k"$@"4 Q2k=z/㣟dH'Y@m{ʇ7"%qz}QfKkcOCԑh}yZrHMb(Vѯ%_/:1N:_k]+n6j퉋z/{㨥xdz_>,G]Ov}]<-hGto}g<P+R2Xkl=Hq`{[:!a>E޻pk]G}؃EOѣOeq#J.CE}\НEjU/\>w_(Mmջu} ^GnߣmktˆOqyo=}7}x޺{1{:{g{sه\RwqxѵrQs-;uFr|~MvvsKnyz>ߠV um.p־u5z9p^־u513{q[ʈǘ,4#>QA2ȗ}k|>:罦.RsFĜ`o+gۣĹy=[=q{bT{+g[q9qk|ul{Ĩ޼ԸE9<E{9{zr==q=zN:ˍ}ih}&(>1>]v.7^l۔}tt-YmO}+@snhOO-;~2NZ,[]o+ߖѼw(;Cҵ1׳te/[ƩCwc"oMV-tՋon긙g:QeuQnt}Fds#ƍX}bX}y,X޺ϼww89z>-}EYu.-sKL ZZV{_#:1'@"$ ֙mQ&wGY~-]/}io=X^ucݳ>ƽ$@"<8Duqc{k[:-c =~+~gӵg'Scwm-Fn}KՍ8'OD H; $ksk5᳋jX8Ζ}H|Cɑ4HD xn@8{>=cAc(рӹFG|ڈ|tF|3W"$@"L9:lm~o?qgѷ1L93'= )cD H-s<~hnͿ!`os^s$@"<54Ywx!h_{o\3,6gttsP^-G?r"_z1нq䄢g鉗] ٩xOV˾6xQQOl_ZuЉjy~=񉴕5GltZy?3 H\ܸVmziy-;><.o۪'9Eۘ:6gwbqŜVbG+>ƴ|tį܋>MhL8#ωw__#L>HD xS%k$^?Wc€5/H[of1ex(>Q28]v"⺏#C.{JD H*5W8>QO7 ~7$@"<#rt~oQcwmxn=0={ZI/hC@n|Z8(\8^8~my?C+9qƼŧ=t1qh=os|$^8~qW"VzFEiFy[|˸Xէtiz~H^sKD HD xC߭4<=J>HD xNqN{1y,q`|=g~(_D HD HD HD HD HD HD HD HD HD7+\\yvVIENDB`testthat/vignettes/special-files.Rmd0000644000176200001440000001077215056336252017324 0ustar liggesusers--- title: "Special files" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Special files} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} #| include: false library(testthat) ``` This vignette describes the various special files that testthat understands: test, helper, setup/teardown, snapshot, and everything else. ## Test files These are the bread and butter of testthat. Test files live in `tests/testthat/`, start with either `test-` or `test_`, and end with `.r` or `.R`. We recommend organising your test files so that there's a one-to-one correspondence between the files in `R/` and the files in `tests/testthat/` so that (e.g.) `R/myfile.R` has a matching `tests/testthat/test-myfile.R`. This correspondence is maintained by functions like `usethis::use_r()` and `usethis::use_test()` and is taken advantage of by functions like `devtools::test_active_file()` and `devtools::test_coverage_active_file()`. Test files are executed in alphabetical order, but you should strive to avoid dependencies between test files. In principle, you should be able to be run your test files in any order or even at the same time. ## Helper files Helper files live in `tests/testthat/`, start with `helper`, and end with `.r` or `.R`. They are sourced by `devtools::load_all()` (so they're available interactively when developing your packages) and by `test_check()` and friends (so that they're available no matter how your tests are executed). Helper files are a useful place for functions that you've extracted from repeated code in your tests, whether that be test fixtures (`vignette("test-fixtures")`), custom expectations (`vignette("custom-expectation")`), or skip helpers (`vignette("skipping")`). ## Setup files Setup files live in `tests/testthat/`, start with `setup`, and end with `.r` or `.R`. Typically there is only one setup file which, by convention, is `tests/testthat/setup.R`. Setup files are sourced by `test_check()` and friends (so that they're available no matter how your tests are executed), but they are *not* sourced by `devtools::load_all()`. Setup files are good place to put truly global test setup that would be impractical to build into every single test and that might be tailored for test execution in non-interactive or remote environments. Examples: - Turning off behaviour aimed at an interactive user, such as messaging or writing to the clipboard. - Setting up a cache folder. If any of your setup should be reversed after test execution (i.e. it needs to be torn down), we recommend maintaining that teardown code alongside the setup code, in `setup.R`, because this makes it easier to ensure they stay in sync. The artificial environment `teardown_env()` exists as a magical handle to use in `withr::defer()` and `withr::local_*()`. A legacy approach (which still works, but is no longer recommended) is to put teardown code in `tests/testthat/teardown.R`. Here's a `setup.R` example from the reprex package, where we turn off clipboard and HTML preview functionality during testing: ```{r eval = FALSE} op <- options(reprex.clipboard = FALSE, reprex.html_preview = FALSE) withr::defer(options(op), teardown_env()) ``` Since we are just modifying options here, we can be even more concise and use the pre-built function `withr::local_options()` and pass `teardown_env()` as the `.local_envir`: ```{r eval = FALSE} withr::local_options( list(reprex.clipboard = FALSE, reprex.html_preview = FALSE), .local_envir = teardown_env() ) ``` ### Teardown files Teardown files live in `tests/testhat/`, start with `teardown` and end with `.r` or `.R`. They are executed after the tests are run, but we no longer recommend using them as it's easier to check that you clean up every mess that you make if you interleave setup and tear down code as described above. ## Snapshot files Snapshot files live in `tests/testthat/_snaps/`. Snapshot file are named automatically based on the name of the test file so that `tests/testthat/test-one.R` will generated `tests/testthat/_snaps/one.md`. Learn more about snapshot tests in `vignette("snapshotting")`. ## Other files and folders Other files and folders in `tests/testthat/` are ignored by testthat, making them a good place to store persistent test data. Since the precise location of the `test/testthat/` directory varies slightly depending on how you're running the test, we recommend creating paths to these files and directories using `test_path()`. testthat/vignettes/mocking.Rmd0000644000176200001440000003124415067547665016246 0ustar liggesusers--- title: "Mocking" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Mocking} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r} #| include: false library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") # Pretend we're snapshotting snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") # Pretend we're testing testthat so we can use mocking Sys.setenv(TESTTHAT_PKG = "testthat") ``` Mocking allows you to temporarily replace the implementation of a function with something that makes it easier to test. It's useful when testing failure scenarios that are hard to generate organically (e.g., what happens if dependency X isn't installed?), making tests more reliable, and making tests faster. It's also a general escape hatch to resolve almost any challenging testing problem. That said, mocking comes with downsides too: it's an advanced technique that can lead to brittle tests or tests that silently conceal problems. You should only use it when all other approaches fail. (If, like me, you're confused as to why you'd want to cruelly make fun of your tests, mocking here is used in the sense of making a fake or simulated version of something, i.e., a mock-up.) testthat's primary mocking tool is `local_mocked_bindings()` which is used to mock functions and is the focus of this vignette. But it also provides other tools for specialized cases: you can use `local_mocked_s3_method()` to mock an S3 method, `local_mocked_s4_method()` to mock an S4 method, and `local_mocked_r6_class()` to mock an R6 class. Once you understand the basic idea of mocking, it should be straightforward to apply these other tools where needed. In this vignette, we'll start by illustrating the basics of mocking with a few examples, continue to some real-world case studies from throughout the tidyverse, then finish up with the technical details so you can understand the tradeoffs of the current implementation. ## Getting started with mocking Let's begin by motivating mocking with a simple example. Imagine you're writing a function like `rlang::check_installed()`. The goal of this function is to check if a package is installed, and if not, give a nice error message. It also takes an optional `min_version` argument that you can use to enforce a version constraint. A simple base R implementation might look something like this: ```{r} check_installed <- function(pkg, min_version = NULL) { if (!requireNamespace(pkg, quietly = TRUE)) { stop(sprintf("{%s} is not installed.", pkg)) } if (!is.null(min_version)) { pkg_version <- packageVersion(pkg) if (pkg_version < min_version) { stop(sprintf( "{%s} version %s is installed, but %s is required.", pkg, pkg_version, min_version )) } } invisible() } ``` Now that we've written this function, we want to test it. There are many ways we might tackle this, but it's reasonable to start by testing the case where we don't specify a minimum version. To do this, we need to come up with a package we know is installed and a package we know isn't installed: ```{r} test_that("check_installed() checks package is installed", { expect_no_error(check_installed("testthat")) expect_snapshot(check_installed("doesntexist"), error = TRUE) }) ``` This is probably fine as we certainly know that testthat must be installed but it feels a little fragile as it depends on external state that we don't control. While it's pretty unlikely, if someone does create a `doesntexist` package, this test will no longer work. As a general principle, the less your tests rely on state outside of your control, the more robust and reliable they'll be. Next we want to check the case where we specify a minimum version, and again we need to make up some inputs: ```{r} test_that("check_installed() checks minimum version", { expect_no_error(check_installed("testthat", "1.0.0")) expect_snapshot(check_installed("testthat", "99.99.999"), error = TRUE) }) ``` Again, this is probably safe (since I'm unlikely to release 90+ new versions of testthat), but if you look at the snapshot message carefully, you'll notice that it includes the current version of testthat. That means every time a new version of testthat is released, we'll have to update the snapshot. We could use the `transform` argument to fix this: ```{r} test_that("check_installed() checks minimum version", { expect_no_error(check_installed("testthat", "1.0.0")) expect_snapshot( check_installed("testthat", "99.99.999"), error = TRUE, transform = function(lines) gsub(packageVersion("testthat"), "", lines) ) }) ``` But it's starting to feel like we've accumulating more and more hacks. So let's take a fresh look and see how mocking might help us. The basic idea of mocking is to temporarily replace the implementation of functions being used by the function we're testing. Here we're testing `check_installed()` and want to mock `requireNamespace()` and `packageVersion()` so we can control their versions. There's a small wrinkle here in that `requireNamespace` and `packageVersion` are base functions, not our functions, so we need to make bindings in our package namespace so we can mock them (we'll come back to why later). ```{r} requireNamespace <- NULL packageVersion <- NULL ``` For the first test, we mock `requireNamespace()` twice: first to always return `TRUE` (pretending every package is installed), and then to always return `FALSE` (pretending that no packages are installed). Now the test is completely self-contained and doesn't depend on what packages happen to be installed. ```{r} test_that("check_installed() checks package is installed", { local_mocked_bindings(requireNamespace = function(...) TRUE) expect_no_error(check_installed("package-name")) local_mocked_bindings(requireNamespace = function(...) FALSE) expect_snapshot(check_installed("package-name"), error = TRUE) }) ``` For the second test, we mock `requireNamespace()` to return `TRUE`, and then `packageVersion()` to always return version 2.0.0. This again ensures our test is independent of system state. ```{r} test_that("check_installed() checks minimum version", { local_mocked_bindings( requireNamespace = function(...) TRUE, packageVersion = function(...) numeric_version("2.0.0") ) expect_no_error(check_installed("package-name", "1.0.0")) expect_snapshot(check_installed("package-name", "3.4.5"), error = TRUE) }) ``` ## Case studies To give you more experience with mocking, this section looks at a few places where we use mocking in the tidyverse: * Testing `testthat::skip_on_os()` regardless of what operating system is running the test. * Speeding up `usethis::use_release_issue()`. * Testing the passage of time in `httr2::req_throttle()`. These situations are all a little complex, as this is the nature of mocking: if you can use a simpler technique, you should. Mocking is only needed for otherwise intractable problems. ### Pretending we're on a different platform ```{r} #| include: false system_os <- NULL ``` `testthat::skip_on_os()` allows you to skip tests on specific operating systems, using the internal `system_os()` function which is a thin wrapper around `Sys.info()[["sysname"]]`. To test that this skip works correctly, we have to use mocking because there's no other way to pretend we're running on a different operating system. This yields the following test, where we using mocking to pretend that we're always on Windows: ```{r} #| eval: false test_that("can skip on multiple oses", { local_mocked_bindings(system_os = function() "windows") expect_skip(skip_on_os("windows")) expect_skip(skip_on_os(c("windows", "linux"))) expect_no_skip(skip_on_os("linux")) }) ``` (The logic of `skip_on_os()` is simple enough that I feel confident we only need to simulate one platform.) ### Speeding up tests `usethis::use_release_issue()` creates a GitHub issue with a bulleted list of actions to follow when releasing a package. But some of the bullets depend on complex conditions that can take a while to compute. So the [tests for this function](https://github.com/r-lib/usethis/blob/main/tests/testthat/test-release.R) use mocks like this: ```{r} #| eval: false local_mocked_bindings( get_revdeps = function() character(), gh_milestone_number = function(...) NA ) ``` Here we pretend that there are no reverse dependencies (revdeps) for the package, which is both slow to compute and will vary over time if we use a real package. We also pretend that there are no related GitHub milestones, which otherwise requires an GitHub API call, which is again slow and might vary over time. Together, these mocks keep the tests fast and self-contained, free from any state outside of our direct control. ### Managing time `httr2::req_throttle()` prevents multiple requests from being made too quickly, using a technique called a leaky token bucket. This technique is inextricably tied to real time because you want to allow more requests as time elapses. So how do you test this? I started by using `Sys.sleep()`, but this made my tests both slow (because I'd sleep for a second or two) and unreliable (because sometimes more time elapsed than I expected). Eventually I figured out that I could "manually control" time by using a [mocked function](https://github.com/r-lib/httr2/blob/main/tests/testthat/test-req-throttle.R) that returns the value of a variable I control. This allows me to manually advance time and carefully test the implications. You can see the basic idea with a simpler example. Let's first begin with a function that returns the "unix time", the number of seconds elapsed since midnight on Jan 1, 1970. This is easy to compute, but will make some computations simpler later as well as providing a convenient function to mock. ```{r} unix_time <- function() unclass(Sys.time()) unix_time() ``` Now I'm going to create a function factory that makes it easy to compute how much time has elapsed since some fixed starting point: ```{r} elapsed <- function() { start <- unix_time() function() { unix_time() - start } } timer <- elapsed() Sys.sleep(0.5) timer() ``` Imagine trying to test this function without mocking! You'd probably think it's not worth it. In fact, that's what I thought originally, but I soon learned my lesson because I introduce bug because I'd forgotten the complexities of computing the difference between two POSIXct values. With mocking, however, I can "manipulate time" by mocking `unix_time()` so that it returns the value of a variable I control. Now I can write a reliable test: ```{r} test_that("elapsed() measures elapsed time", { time <- 1 local_mocked_bindings(unix_time = function() time) timer <- elapsed() expect_equal(timer(), 0) time <- 2 expect_equal(timer(), 1) }) ``` ## How does mocking work? To finish up, it's worth discussing how mocking works. The fundamental challenge of mocking is that you want it to be "hygienic", i.e. it should only affect the operation of your package code, not all running code. You can see why this might be problematic if you imagine mocking a function that testthat itself uses: you don't want to accidentally break testthat while trying to test your code! To achieve this goal, `local_mocked_bindings()` works by modifying your package's [namespace environment](https://adv-r.hadley.nz/environments.html#special-environments). You can implement the basic idea using base R code like this: ```{r} #| eval: false old <- getFromNamespace("my_function", "mypackage") assignInNamespace("my_function", new, "mypackage") # run the test... # restore the previous value assignInNamespace("my_function", old, "mypackage") ``` This implementation leads to two limitations of `local_mocked_bindings()`: 1. The package namespace is locked, which means that you can't add new bindings to it. That means if you want to mock base functions, you have to provide some binding that can be overridden. The easiest way to do this is with something like `mean <- NULL`. This creates a binding that `local_mocked_bindings()` can modify, but because of R's [lexical scoping rules](https://adv-r.hadley.nz/functions.html#functions-versus-variables) doesn't affect ordinary calls. 2. `::` doesn't use the package namespace, so if you want to mock an explicitly namespaced function, you either have import `fun` into your `NAMESPACE` (e.g., with `@importFrom pkg fun`) or create your own wrapper function that you can mock. Typically, one of these options will feel fairly natural. Overall, these limitations feel correct to me: `local_mocked_bindings()` makes it easy to temporarily change the implementation of functions that you have written, while offering workarounds to override the implementations of functions that others have written in the scope of your package. testthat/vignettes/review-text.png0000644000176200001440000022726614164710003017126 0ustar liggesusersPNG  IHDRX;> iCCPICC ProfileHPS{o: -! %@A:JH %T ,+*`Y EYu`C^v{oޙs99)ιP"I @X. d$%000P\LŠѷFbU(tr y0j7'miR@F8sp(c4G}b([@ sLND㐣QvEbr(yܹ#< /BCfxQ!dh(sX,ŠZ=s#T,N5"( a̓Sƙ P͙9*7Yp8KƨreH٬qJ'*Uv_ K|QqeFLUv"FU@87D{/8ra\wD1k",IU_<U$9*AN.ˏU핣hfqãDP ȁpA1#ͰJHEB98#9Nf8O{$G%D2a[Y qRyb~#i'l6IB?fÌ` hC8|@ ād0䢕rPJzT`7gEpw# zK0ރaBz d C.򃂡H(JҠLH )EJ**]P5t:.C @a&4LGq,8΃ Bx-\W,|w/! j1E&F$"Kb Bjf t"' C00L&a``*01 [. 5cl6;[-c/``{q8gąqY5:\ ׍zx{/> E3GB!Ą2iMB/aI$z|:b3:8L"Y|Iq,rR9tVMMLKmHmZaKj]jd;2JVג[o)%BSR)(O)թuRJ4,e4k h54ٚ\%5iiQrhкէ׶kj>MET6G]IC@h4-VB;Dk h$ש9IGVt=~~y$$Փj'ݜA@7@W[[{GC/X/[o^} y;/ | xG †v1 w^3226 5m5:g4`L702d|ڸ߄jg"2drCb0a ]fff+̞̙[-L,Y,xhIdZ -XY~JZehgkͱ.~lCɳmefna۹ *"&'WM@v`9;8t9#W86:b1%eʆ)mS9;8qz¹ ϥ+5ukk7{7Ti[ݿzxzH=j==-<y{x˽zsoT=S}||;~i~?uslYYWNlobvKT\4,$3&d0=tahK6,"lC=ǩ {/?Axi)lO q@'jcԓh3p3gTx(-;'@uqm  K;$-N,JnJ$Mv~^}j[W+_)q*)+?\}Ǻqn߰T{㴍 7'O;izΩuI O+j <:ѹs8~!¥!ϵ\tWWz\m~_={^oucj7 u6;t܍{^}r~pѲO4=5|Zu=}7o_z SVb拞Ekͫ``kk5o{u(z?}sy__ʿ~m2WpQA}| !H3Qƾ F 'GZTFjEqvuU,e,.ʷF*U*+_>%olcc@ߏ )c3_z#?*eXIfMM*JR(iZXASCIIScreenshot,Ռ pHYs%%IR$tiTXtXML:com.adobe.xmp Screenshot 1384 964 1 2 @IDATx]E'=$^@: ("XwUuweׂe]u-®v^(. "C!=!9s͛w{~_Nr3s9Ν;Z@kZ h-Z@kZ h-Z@kZ h-Z@kZ h-Z@k#>Dg٠_y]p`y= mɒۦTZ-O9LePYJ#XWŧm;lSiӼpK<|J#XW%?m; ?47@# !KeЩ+ހU-nHn s#̹hj Zjm7d4sƲdUḞJǰZ_|ǣ [Z h-Zx601k~x{D4Z'(?ţ6X єI7 w7MW]:|S48MuiZu0HRJSYidZ[v"Mw0S)n@4S~qP A7HFrL ]?g? Z&6 achx@+w4|*ւZ h-ZZCǪ7ZQLH*dOiЦAFZ B. .*W,by)]|s~]^<_gx9}Js<_F'٢)^Nҥ/ϧi:}ZyaMs< L NNӓEh'&*E0+ͧ7/}:UFt]Sc^bS:5NJ8iT'qGRDHߔhrY)Tj O©QE'y,݈ ]"0t:|SMSxNLIFNl}I<sC'YpMӒRqp`&:'UobvnKz5R%˧t 9+0=Z'`}UyKUi:8)C WшvP,4҃hZ h-0a`\`*CҊso kB߄ 'KeuySq2x\_iʔ'-^uVl\y*rR/:n)ߏ7+ocn F*ocj8l;P?FĄ^"UN. ='k:!#/9*WI7W}UQ2VrMprA"*S Wi9vaIlɄu9ĂA$x4eCS6Z@k6b L0αx+8q+XpLtuvZKKXiI+UyI4HV tR8]^XiXi,+8rSiB./ʼnݺ OS.׭NuOҵi : 7 KqsNh𬢓,p9<ߏNeEKҼҊ('~cpS x M奲KnȶVU!ai^iutAk(!mJʂi^SMl~·tHӢ8No-X`ŋnN”Li)LuaS\OF)GWU”Vb`)+=k֬ooyxGfmvuûkʲOˀa2/< V%pv) ,xO)*鬊}.:y╗TEۉ,գGutq_^͞4C49dQ!b3:(P ;HzJǼ *ۅGryRr~)e/]vk۩оôEqWeo!ɰ[oxǝf5] %taҤI뒼QN4Ep8e/Y;V{uhM{h hD4m$]4 ⓧ0+LqJ+X߸C5&/OJ+Fi/]Nlj_ '3f׭sg|Zh ک=+3e:@K'-wl<wW'$e1)?sm H!/?<ѹDOz&9]Ro=kz8 F (Pʂp錝Xgczv5ѽ_ہf̛%|rtt1λ~d hmMe'ۻ,x#u$Pma\CyH6̸R6YhkHm)aOah;ek./)sBi^tI^ ~o(/{(/">,^HGm'o?N;iycCXҔNO4-FEHA]تaO|:!94_,t]~W}ٻ>)O;8;{5kzv&r90\*蘅0Y/3`K!}b)yㄙTi=K} ]O3TG',YA9Rtu݆1GV&J5-"ijV6ڶmЯGDwzmϖ]O_x7L2yOJ\3}ɾ٭G`c\^Q P m_:FUCۗ 0U9 S}H q,^G[vTkZ UKvrznȹ`]<+k-Hjh ZZ571 A&iR%Q>MUF> )ޓ!6@U)L ^Vr:*OJ GFbƕ]Ñ${Opbgzj‡ܱ4Co0qk֬z{e} QmfZz}}ڥZ`4hC!T0sq^8i& Ҭ֓XaÃI3ag”)Spksx:Fj -/Z]Pnr^8uLLmY>m1Ne ZU\r eqz*hqsLʱLc?)$`3.@U7;R&j8-ܴx=>N5/If1xM74lboO(n|N"D'Ấ qf`n\f|"Y&Рwl鶉ϧfB&񑣡gO Usϒ l Ms)|yamg6XF֑UP8 */\x0юy[N*<`i  ,PMMzbax(;"̄nQN9z`'TbѺ$h0H Kǣ=#lVi@jjV0y,{alrXNܹsHx;O7`X"tM;tgSm[&a-D4 I/B%7n }xM}e~nY"ZfjFxfΜwj쓑Ыԟe˖km\-lwö \3l̯6oya9۸üAEsƸʱjWJ:#477p(q$5*O͝lMjΟOW'Σ>N&<5[<^GV…wͬ1ºP 9oy (P8xI2҂Vli`I|SxPO0+N <:e2Yߥ@yEЊFUwqgˁYI NGEa2Á zkwpbF젛;j˖/ |Ke}ƞӲcbdΛ7ם+d<` S$5ў?N+*t+6kXuaܹ>JW6o^57[\ @ ݰ4( o;7!^F hWkln90X9xcX2m;lg mJ2;ENk,7ɩnM' JigL:uwF]K:ʅzM,)àP\HcʁaHwbSle`0&6ɜGxL.x=ҩd$[T<Ž&tM(hȖ޷llM83lYiEKpSL gL7iy99O۱L:t ^2^o & cekvCmCkf<߲eK}=k?E4+Wqo],ᔎ-R|/0D4hປB6. f9`]h7i}A8#&! LNbc`{<%v0Eww]|nP/}b2mPhc퓬n< @n{aDy-ַzyϰ Ƕm }8lc0^ {o N;?w㊹lJz\dK5gV`X*I9[n [ns.;o=\qEo~f͜7Ikl^=)aԇqτ;?!j ۘQU$Fvc~s0ŜUɞ%iaĖu>Ϲ !u(ֱBK85IN"ї]Mά.^&/XD"s\Ai4RZei\^ ? >)<ϧxS\ylG>ʞw1,r~jw27ϱc96<'nmD8qQG/zRA]rsݺ {3m|sw_o~3lkW^^CPϡ/`Mϟ}.o;/"7o3}#ΫBP s&Qh/1b1Y\INzGyȣ3VOu6(M\ePؽhS311c(-b n~._tN*[:>oup̱v!csy駝fu;/^:*?4sm@uvI.gsN+:Y, \_R:ѓ Zԛ,~gg(^UVvw1\w͵&#$/|qz9Mo|clEzы_&)FVWR:vQɮLI~1#>==U'B!"C:zЁ#mp0GxË1@{a]pc?s77,6LӶ, [p#sM{pqؘgس6Wno7?f4qxݲ`%IԩqȘb<}7C2 RV,/WމOAAM<te O8aiNNGb"~3a[;Ɨ@fpG+@П;V&٦z;,2O3ww@h/&Yƥk8n8X|8_ ifLӟ|RFv~Y,؅k w}WxǸ>baECާVw??b;3^(Al1d8/{|9S|x@Mہ^\#ecv4Q']`@CHڜvִsluYgy2v'ZTA"?2_B fIwwkxӞ}<0WvS})O~?Ķnؑ?viG?SF<3P/*w9|1Xo◾e]滞k]ޗqpԯw{g4x1|C7 Nŝ<Ĺao~s8Yb'3 ?lm;n3̰^{9 w| s\.[-wKvN;#N¸|s: {Ɇy{y+_+iJt·#7w'>a7!_G_{^__q!g؍X:(E<.lAĆbE,<9X.[f ix (/`Z G[oC |ljԇ;lsvο׉@c't/ yfCW#a"ڠN+e(ElNˢ #Gom^c^ġbO]Fv c>`UEe_u]TQ"td-g0ikmCm^{z_KB7&KDÂAy(w =<. ^x=aa]W۹1^pcEhzKbOlHuiiDXbx~qh$mxYK]%$sq#N 6xq9FcIC?`QG.bOL [oLH蓟d/~ / (YLpXXycs9y\!'?d~ϰ#;f}\ zs9*ۭe bSnΗn6_J= E*/xze4(8-*=+S9|~=5üYĥ'c1Xq:(letG*SЙ!Xe27a]-`T(ql7?x98 x17p6^78{(gc Eݢ1@>0yV߉77j{85 e=}IKX̥ㇱtàmo,Nq`w:ksڎvH9Sn+~+~,/Lgʙ*Q6i+ՍN(sԸ1%`@OF=f{嗻k-dsQPB(›Po:2 S.,,ZJ%֙3rz%zM cp0ɣHh $!zv+e?0[]koyYL'|rq{Bon}K 8XL/:Y@3v(~"O`'3[,:Y_ wG# ԁj6sεs}-ylғ)k-U T&p&o$"I%vQ]w"K,,w?5{uY[1j،>EB9Y,d| s$h w3qqƇ>!āv/?uǙ*_ܩC<;=8LW2ۏ;a<K}~nqG=*|ӟ5;Ns _z-^w;w󖚳U>fq㣞؄^CL/ (y i6N@>q6e:K¯/]6v,rz2O7.o*v~} rt%LXai#zΰvy:pAzի &`hO:KVl -)xJ?U[?VQߴD)'/kg DNx2;1E*[`Q-sө;0s63n$ý%% ?:Nu';^GgK{S闿w2qzw{ܡIH-;P\ogrΝ "u˃4`Ӡ5dSOLJSh\Qi]?_6oqg\8[`/n/~׳q +ѩ"Ti<1vY`;8(f;J!3oΈF_r| _HԒƂ,\0%[ueQ`;wn1;8VȜi뮅8;i&7p$Y|%8䑿Չ;T0`YO8I`,j}K0{[Ҫgȱ &ƨhgZ[me-Ri.ۻZ%TZS_Yw(jpRJcc6aͲ5]”e ]{􌍷a{cBǑya81ɷEq̜#ejM62Xx2e\kG4p#4˜4<ܹ\e0cQd>;g3.;"\z=|{ok?!Ɯ N>NKq(7Y:Qƴ1--Z"'slevx{Eڈd.p٥hͩ}"DٱUy= T?q}j/x0XSX"[d|0o7] \!O{etS\b$ю<2$`ܜ|p!|:?J7?eLG(3sYy`Q)H bK\U< )] oIzJ !`)SHrTF^?>wZev4tb+4|8YCwX] {@`BQ%ܘz->_c[ޡ+(Gg:)V1Э$(? NtvMi$ %f#}m&,:yMW-ζhB|Ⱦإ/j/∋^^C}Є3Hx@Gߤ\{u'+)gV(X?j&"&ByfLq<IJҊя ~ZK9/uM6MT&.ջߓ{߿)M{αM pܩm$c9X* qԀz|0V*P_PyjWZʧVb{si" ]uNDcQE"ĉϔYMBc\QxMLrm(ۆZſs zЃ-xboq"纕:v"N/OB Q~i>%E>tIRa_v1!{t6zh0`~1p9.fS006y OIlp`@mP,pۢ^v*d3eԅ)p6BrA粽Q;A'v#'Z^H32:tE E]6A3v(yƠ꫶z=div|\{61L;>nt'ta6z9lJnĺyzu`}}p fٙ4uG:xY#YOY$܌uԍl-%.qV~} 8`w^QyܝKioܹvLvl\PO`0F$VĤ7w%MW* nyy霖2iR#L0Iі;XKM Rre o& 8 R.ZLy>ƺ~dhUu͵CcEvno 7c;^`ꞎ:lḻa c$Py2'"'?~yvnԂ#`]y6wwJ u+:BQ"I&1G7i˥[GJ%j "BQMdQ(ͣO9hMg8 [, VE!ɯ̙xJeDWzOY%!" |+S*Ki;V onp3QcFsiɘDtezDt(O&i ;к2ӱ˭]m6!ʩasǭnt5=l8juݮv3J<&;^ѐtaNS.&3"MJUdWIv!g)6)Jn-c=enԱR׸뀝N/9Dm9-鲴>QK 沤>i-#`dGT䩢seYRXوϳP[buOG+`7÷Uvrzf|p\&zI2#U%Ur JhQ%+VYA"_ҡ0p:QE=t&Awy 8ҘQ;/qo /ԟjtD{;QsseCOAWp KERx \TˣH9[Շ: mDn;1K.@jKծЈi{~W6hl>ZѪn@u.nx€291{}XƓQc`yzb*/x5"ߨIN6(X:u)Ktk.e4N.)M6HV3)\W. >yZx|Wl lM:Ɔo Ej LEgbrn3LKs+$k*VQ> ȈԤ>KD/stKkB_s>,YdtҒmeWň$)i8cKdGr IRkN <Jl6QH
    LvCʥ^ ^W: Ё^CSkd*UcM_~γ)S&OwnZ h-඀-8˼iMp\";Z20.G.4 )婌AmGL)YG:_IajO Mt lKX?C1xaK: .~W|Z h-Z`[=RmI)O??^禧Nmѹ5>_S_ `,U縂\9Xb"< xZ| |x%0DUâ ,MZ b!n}ou-p}#:փx47uҴp-sL!娝 ZeS$)l ^ӧLlsH8~'3fO M* \Eb>2ʃڪL0, ` b^UѤeiZU(\` Dyojwd6n-Z8sŋ 7vm0cƌ[o5wüyqUz]p--7V^jYt>ZE+ʵ_`MuG}6yzE |/D*@sW4y/~VJӴ2*Kai 'Sp~Wl']PZ h-Z`%onى7o^cyj0 p7f!c餭ԉšI>FVipV[ b()v,{%ĺ,x4eU Oe5g٦V7U 1eF x N#=i~dM JZ <,zM7,_,-y[~+vAҌt/@犥K}S\`i>MeA4yyBUϛLXK'xjoJ Ky9<8C4G) hpJXSJIRR#EY nf[f9YaVPL? 1 mZ碪U*7L7]ͫR X;&J&Hy;p;ɠI`r`ۼ\Q+4(/˝lCkm 0Ƴu[ZbϥqH QowoIM3/?K QHV٢Uw+oSv}=> Kaiz4zt*^5JqUE`OcMiIy٨ $f\`2 -*~9 x>}.|͞y?)Yk&{618i+W}kxE Ç?pXO=T*}χ>G:K_p)NR";/Kq(۸@koͱy.5%o zի‹_/}),[s熟'^ sn#\sgG8aRwp6wp8~މa'?v|Sk~M]I}>F48SGiNCz=غpDžvzR)4([۳YWd6ŃVSXʻ'J=;I0%`XnN4*w3X xUrR8i ZxeL>qp=EL෸/) o{XpYRdpwEᤓN ᄏ_oy}tRxt+_ ￿ R: ]7eye#ͷh-0,JAU4ƼQGG?w~u_.M|c79>YCcofMvai3A;L!9Pqo-S~ٟUwDٲ5KuKn\pӲ+JqnԵUk]nRõz*û*ܺzvec3v_CvzT8|#Å~>o.RkbmgZ j:pR4]G?H&cmᑏ|d9ЮpWSvvn馀sW@~#j+梋.=#ホs衇zwG<ةKCgؽ:êWtNw3 Ò%K[o}Be]JPWZ>`s O{Ӽ^"t0sL qF])"#ĹkJya7:`/IяzT9e%6ZbMO|>̚5+N׿<9|c?r ёƿ/uw9?sج32>ϛ7:[nſc|?g~Gv˰e'[~Ӆamd5_'+:q_t n;۝]6=d߆3fۗ~v}o m;ߜ| Y709pXn?doN}>v-?p?Ù4ar5l⧾@Gp)4=<{zPV#*QZq?Qiɏ7}_ O|y<H~~g ‰z^Wso}(b?^s oܩz[y.O};  o|_~3wQ@ /Kw _iӦ9vpN8l-!,Z]W^wl_|;ulv~8|gq;8o˻ÿǓn?'ѫ*G?Q :b@kqg>%/ G? OxrhKEt܅SO}cn؝gCG0nĩb'}xGx{4ƙ;RpsC8岏%+.aE /0gmK}M8f秄Ӷ o|;cw?:1K{m1u{!t9LJ0kYa&[^65\lǣ^-Z-Wc1Q>Ղ*x4ݏOU,*k*㱏}lx _~l1pxFϝ ;;_ ^:Q'?ᎋ~O.w}nfm184tҰf~׻߇m(.0 5b|տ?mye8 cOk_Z+=#c;0bs9'??Ozԟ9s攎Ud2r9Okr$1WpC\7w.hk!bGK.s|>u<srOowa'LG>w89:pyc=үufam /&|opnGyVYtaM؏IoNYai}g-wݏ ngmN[MZN:S}|\ן>pml}%S3On֏Ì3{nw~>u*M ZW+_T+U9XIJ5W~:MKrrbvw;.h*guuFꉃ{7pC;ݎA#0qjqFٴbƭZ G bgLo|}dk\j5;A=Kg]w3O84_}|rG/ٝ+Wvxz 3;s:#Cȹ{VtB9}vՔpՉ |rop͢+f¥. v挽燻%d{. ]᜛NaV6p_<VYmXcNo4# 5᏷^.OsY'MzQǨWi+ի7}**^2Hb")\i-u*}ŷQB$Щuj[}džG|<6c'GMp;0lsN`?g8XVw>ǎꎣD̛7իV.7m$33.^}Gx/7Ͽp없S O~UyJk~ᛗ~1п o=_{|wׅ_t͒}~R8jc?~'ܶ+^v9I/{?}+L4w8;Y-{YWm^|k\p-3i㧄|rAy֯Ui%\Ĕ fqWZy.J14&=dofv{0mt;복{MmۂӁa,;Zrp (gzxdwT!Qc8٣78Ugo!NNw_Sv]y`p9ԇ4p؇a,Sj ii[GDX~[k֭[OioJsgV]X|aL6a%aYޛ@uTgͣf=ͳѲ-e˱~ Bd̬fJȟtd%E)ϐLdccl}Uk];w֙:\OGzݫz`|mJ{/L羋ÿ.zfϦyh}? #SmgD)>(9Y\y !j\1 A]k}aW pk2B\x ߢuqAhB6?kihY+?X!Z6+˅,c!u`.K&[n*[)PgYIod%%ɥ>1j(o ס+no0,aO\m'z\Y쐺v[oy[H;>ru;ME@ ʥkn)u@bèMp ݉ȓCh~  W_[<`] Ϥ\nHIN*Z( uұ¯xX||PA`I !1_t4c$ WI-. ϗ)E@Pq>/eۍ} #0ʿr} 7(c1>g>*Nn(*2r )9@"YX~N=_P E@P1Gs<˅"kz׸ܗyS9#ωy_׉4i.-2Fqgt#uz rqB4ev2c,ELAB.uEFn6,·gޤ8Dg҂"(|NPeA#op|Apn61p}o¡{c:9sޕϖC:|lZ]*_'qfV3MkM4ocz_@9Jږ}LʥF9zB@p\V"0>D[h͊RE@`Jp%R|cA87"3Bzyƅ|{\$ZezPHK>;4%)m}P%$FRqour:?GE0(ґHE@PnٜYi4s:W3cڃ϶D;parj|m9p˞LE@PE`"rPE/ו^չ|kK5Al2aejE@PE@<%Jõ9,QZ@iu>_JWE@PE@=?U/<" R %XN*N"K*!Kl5UE@Pq\e\r%{ꓡ>In&q['" ­ 5+"(q?N}'ȓp}'Cg| I&6T@_& |VE@PE`?I)zvzC\) %Xi ٍn vG}>#pګχE@PE:@ 1U >$/dIlC V "Il,&+>bh]rsmLΚ4q+Sl\hTFY(llδg, qQ b'*vbct-h}h#|~#fXأoH R).z},y}P]4YTb/5"[+.%Cf n\KڰSTz%+MOGWכ\Z!ey_TRVc;LbiBQ)qF)'jCJjr~9I>٩/ꌭqZ$6,=S76Q{FJ>; "є fčDi cڑzVz;&-J#}#maNTlg#?EH"J'c۝=X#2EUj[uruA4Ä4|d*M?`&)dz{]?sz;DnKڶ[RQsːD_I3yK9~,R米D޾~#E,yi'.EH8^ TR>D&il5`6NBܦ(JFRo>^RWOqQ+Rm߬#r#2HIRӄ)s*ckX8G4%ilՉ 5U/z4G/_xCŵPg=g<QsMGڷ][)#ۍR{IFj֏uv=mźp&2{O 㒼ԉ"(nѵ}y;ں2|Yj.v{I6q[_vٶ͚JNf G`΋Iٛ2M3y5֛<{T"("06^`f$KNtBl$}'m6KBK F>K'\a~Rl&,\d("(AƓ^$<6\N V *} 1٪իWLBCE*!"("::Lx/*Rr[q/~$M %XD"/eiy'u,I.JꓞL/ Ȑ-ME@PE g067?N`sp|vi\l|2/D(T"z7ou]Q9b0("(:1R<XW)Ϸ|%PUbX&j 6~zI\/"B="(")]׺ҞPۂ_?+*UE@PٳDXi?lΑ$9 ~b(J=$V*+i=S+ @E@PE`t1a`Gg%XY:!n's󢟖.X@E@PE@ V%ck#eI΋Lԉ\"t@r_]<.уЎ0bQACޮKё2tlkcFſBGdVǸ⩠E@Pdq݁DHl/I:\ Ш}e.-/C'Gptn:^noۀ>$ՉMa'2 Tk("p#!$ɥMF򒺺R6Ó4.V %X 8Klj[&,Y :FeM|Җq# W677Q"_W_OW.3n7U /R(11uц~-,J]#~LqеAjnHOwD v 6ZRE@yptnd@I 7:ѡP֩Y` r Fp|~P'}NAp7'xnJMMM2 Д)SWW aYJ#YBpCooЬY7H@IDATyQHEHjg:C=/݇'J=Ef@oqg}'%Fɘ"e8m}Zl.×?!W!$]Ofۤak'ͦ.`e, NV~L9M&^0[1c 9Ro}[O}O,bmb4ã^bYpǔx5 籄,.p t\ơ%vj[_qx]Y[^ 4[|Oos!2)K灁#z񁲝~ YE}yd5Ƌ&M+V)۩>я'?I?$ n/6^ڵB]~}ŋ0D~B=aQąwRCtVmmts4w :si-Y~Eb3}&PWY- tvCf,.FC9&NNݣQcdֽ>i>z櫧3yM SSE@P|;$d_qg43sȑ#4|S> a<Ƙn:{,͝;|nfٮW^}b\[M_q7!~\v͍Ysnj|z`|jbH#[5skj;]桟d$%> x?>'> ڳgyC1`$ŋ/__ѿPwOY~?5~fo˗/g?Yz^Gy{]zm߾ݴx57ʗ%@M-;w#Ȍ֩C?{Anl㇙ mcDŽ ]t@6:쾰˅C197:}Wo ^2型GiO/gO:g hW?xr9-^^ZthSoY[f,~=-&g?2>%fS?"/'O0'y/gΜ1(Kŋh|V?]p|#=zɂ. ,+Gyg=tI'N4})3f 6)['8>S c9a1~]hb{3=nX0Xav>GS: k?|;f?|%:yj`B@O\Җ]f"Mjo{VϤvҹKfV mTz1΅RG>;ۯ/ xa^\.^o| y:ַ.]D7˿ǎ]wE_ ڷo}Ao ۱cY7I7nҼy̛7|#~DmmmF6?Xḃ~&6+?x&kg򷱫}Y6D 3R-hәOФ+ ѧ+7o<<5y5Uy›pa O[;<A:hs]3 O}O%KPp_~|6}-=SHxٲeٳgӱchDa.͋/hƐCѪU._=J===K;׬Ycbg>V2@I>ᮮ.[_zY /+W)X%A7o6D+'ȟ}?HHKfuޣ ez^l_wxqi+/N_U<5y۝3&0#:x]2@Ub.{B>e"h1:vvy1*N~EE9roP϶DJZi}6GPGgJ:%mB~7^АMk^ć dl̙4gGw;>/~}lBw??o71oۼ,X`dF>zg|2& BymA4wu׳Nr{.좆晦ѧhE˥S?ٮ󛨟]|'^k0߾NZ2y~[Ȉ q>~fraVS$rT b%ªFQE3d /Ç~] J//m۶u[nŐ)|y-ZD37xuA lSg=.@d͘/{ӻhڴZgҥK#iƊKФf:3Qm?F{םl}d"ާ>I">˾=F˼^'x=KsVmyehi蕟Lp~ly?cƉ\A}n6bkz)"P+_f1rµh"*\H9,2<>e A; i:u}B; EЁ>o+|m1`.Cɤ˗/7ma,Cl&L0Kدgt˂.4L>AN_KgP> r7!T4nfkzl\ojnqkZcCLD 2y<1)kkm2.Rg;1?!Hj*rZ(`!NՕk?^-Гu2xA$FT& )kx ff7e|SBo>.~#?n`eiބ(÷M 1bӾa{/Qټ)09뜲cSNi ˃OWPGff O>٬WfV KuXY%,%_A_%2a.8`f0Ӆ1}\OВٓ+4gD^ôIx+O͉JD?K"*;"|T ի͋/0\x1?x3`}׽K[EwMoݻw?A&÷#7O~WʬYD oRLSxFnʳS5Ѵ90cL^gHMkt2{^sukɳ0VPF>a-yyo!n333ul׷c Zs<[~2vg^b7uN]ot$ dk[v5(X!!f7 3C z̾,k cLf0n:yi&Cvig?ڵk=cÆ tEC0~̙] Zxra+-",IY2HeM6n6lYX %7Ǚ/ ?fՀ!@vZx65P8>睿~ڶ }| O䫉g~egYRZx:MJ?_tД -WYҟ'ܥ: ;, %X:J=DrU"B R ɓA-%45, EϚ_(~]-ׄۯ_M旇 zx͇l1..l?u=<|c^[jf# +M0c?既=.,4=w7kWs05xmOn_ő^Ʋwep/!t K|n[J[a#U(Bvh-ۍ%j|x`~̋~1MbSB~_gDž7 l1+o֖VzIo~˛ͯi~F3/lq٧Aghy%3ZاUUx2G9,ں7 9 bC}TIh(("1 2+P/2 q@>#Qxc̆>nv{&?ƷUXf ryxH >@f߷=賌?|+-,  .I^5R dR [2OB 3b,!ꡜMKOI|I/!tg1Ӯhez+b77" 6 Û!ˎiO'{t4[b6}J5`XCS;˵Z',2$-E@kq@b)'zIa'y5QllRN+e_jŒb 6EuQlYv)|&w &mT5mbi(ASӑ=\DN]xv9- }y(!qމr=lVǺ|Cu|y:)\MGʐI Y V =. iڼW."^^SlS;9ŒĢ*"'xl3c;'`f糄 AJPo JpX%6@oe0v9-(`E"w6*}6>k[(y6K1w<*!>S%ᨴ("gx e.wpnlеˮ[΢ښr(*   &:̫"67%;\idFft~ {d/7_]`VYE@UDXQcj("(5G' r W%SbWI(*1Ҡ%JB?&I.Ks~ME@PE`I%@*ʤ>r$)M<V2MO۸>NgʠՊ"(uO`GtM914-Ar2$rZ("(c;vFaAs\fEYRH`I!,2Sz%yKiOtm=ağ@"Z7C"(cAhĉ0Ɛ4G1;`b86bGK 9cn%xUCH$(PCMEFˉL1w$ܣ~iٽH+"p}#PGgϜ9m$6ˇ%MK>}2.8.J|Ap>y ZH~C@G|AVs4GKaN!đM_(m-DA55a@ @V8!D< uX-yJPq|28qkk ͛76D|Wq0aM:PO/\g̙ÐJT3t}! 'OٳG&.'~~ZpM8}XI>]+Fy,?腐@˵fE@aA04:h䖡욍8(`#vg])3ivOӱeh.R;& .!]^$CGqFV]W^mH5&]3f B Bp9ڹyZd)3\j/bJ6mЎ/Б#GwCJ<ӑ\td֫,qa:v-X^$5K_m۶>D ݽn9400@O>sC:;;鉟=A7qvzI|"./᭷@ċ;_xNC<\Ƴ'{y*dzBO /P'/]8,'&] m<TJ7Z~:uzf+D \>Ӟcr-|ѣG^nܺz/ E{*cO˲%&^l63}A-dĎ%N&~l5%p͋y6{9yҥmwyC&ljI~C|`&g6jnYK<8Ν?O 73_C("0N/'`VBI>1ЯyvAI`&Txi,b TtY:e!*=+ͤꕫtqZx ?w|IԹs@ :c2ttDd8a"NOq,asԩS.ZDS'2&`h6ϝ;,H2dџ'|%mxUQ(|$-2y =fj:yG`ҥ>Az%BB,@B0ȱp}f?VW 5B^}z^cw%aƭϜ7>2.#.s/%&o yrQtM]zr0cf8 F,bv%:DG,L%X,ٳk7o i^5k?b*[X >_l9/k~30p4o$tXSE@Pş{y|b3N k+͆`!S^,)i.LE=Cxfidr,a+=f 6ˈ8c K 3.H #l[gn03kL0ai oy/g͞AvIlM$ 9fΛ$hc)~9R X= U˜ׅuA8,Q.˺m漮sLNy'/N__if1s5AP0m~dpI_ Rk[a)p'G=y\3m"(2VB8PT3-@P$">5 R)?~ن NlZx1MSX {ߕ4vF3_b^Bvl2@Yb^Ri }.,a@?@!x0*=6!y9vAIR}3IZq 8~<*{dR8wyIQ(8A?cH|>D62`t-Om(B IWZm#zvvu:߷"-Ts ޻Ł!o#ŕLPqxVOSoo|C NtRzJLk(9 $_%6ybc?_nϗE@O8A=mJ ]O]'vLAf\B: —Ŀ%^.mNYM|@ YM|~Jr/V7aʐLj41F tb]VE@E #1"VF'w wSu4UyH|: y1 WD ql?{qJ^Z8|ӘQE@ &=ȾT5p9X C[[Nli6Scv}R^qm$屝*S~s͏2Pq\1F5[ϏE@PJ}G_+dn9đO](JtT ⑻xEܶ'v ?c}~9?$ąFĬ䵖#|o$T"(" _%!u.U#Dd %X 3l(H9 _S;vq^HիUbku׮] N>q ʥKV6_KjHZ("p OQ+Ub "Vz(QN} " [x1\Lx7;‡^}3f0.QDtVPc$;BiHhbq %ۊa0x 9<N9lN~Ez@޽{z衇LSy#qk("p!ɽҎ8GIJhD5XIjo%'MmMO8afq.3##f2A\O6K ֭ -G~7o77bF[ٳM{J2?}j(Mpy\ %XA:VpzYy.mDl| ʣ$Opxn~pavh摃\L2<\6k,C/E@PE Mi}-K'l[/InP9Xvg8j@ԶP9t#V/3@K9\BVJ>Uq_ҞyW?"(7_{-KʋmuAr`2r1 &\Btp۵^^h#XՏ"(A a;޻ !v^fl?!OAJvmxv$K"("p#speq6`[%BIno48iQPE@P2"(LFPĿ P,CظqW {jTEF Zrx"Pƍ'P$cq9 ~\E\"R/#3eE@P _}QV X#v[ҡ"&zc&hٳvA׮ ֲI("p `!FeI̧0ȩ@i@Ժ́T޽;fNsM,XO- }ڴi}Cɓ'tFϮTE@ 6w ^(B#v>CImD_{wm'L\F;:D[n5dFjܧ>)lI(8(SP̸E&q^(| EvY&aSs08 2NNDFHJٳ6nHg.R //OҺu];Ns{Ģ."(("D%c 4tNR,4.eICGvrFB,+իfy $kǎfm6f79LN>M^Zj͛7ϐ^~@6oln/|.4l $sT("p=#D⸟_؜ΧW-{Cg$H2B7ۗ/㦸 ,;׎] pCedaь3̲xIdq&wy'-]onʍ<;Esɒ%63f RE@P!cʙW5l8B VZGА/r6qe2>eLj[@MMʹl2jmm[nlme B0%^҈#켣VR5$!^gn r!fsԢ4]!},ìV_%6i$S?"(@ [JdRb z>k2QF!|U/ɤopSG8؇bŊ"cEŋÖ#O?7B-#h^PEF  n90D<`ClKd,_p%>%:l~JF[r $F=qᒲ79\ E@P;0Pq:|"+*pdW"*1ړc*q E@P.9@[ƞVrmջ%PUuCNU+j洨("( 2y,f _%iV+a̓aW"("\G' i%T=Xp5WΧUĶ&)~-7A@ؤ{LS X9[/`I ɤc^5&1~E(gիWi̙!K.{˗/ӧ5yMOHx! O"(GT؏ N}2G%dJ|$IZNTrvLn]J|We#3twƾmf[Xi;[۳geĉ˫=6[L)KPE@HC3so:Uj .^`bF4E"Yr 8q͛g HuT >f<%L}c0o\G?Շ"(@;e;ei7Q?m _\DMs6V($jf3g{f+\2TmԩS~KK,1$>"`WEBR v^Pɤ\*mK`I@U;OhU'ݘdQ0 T>w\C,Dw{Y T2'eKɓ'-oyKL,ϟO&M7gYuE@Pn>VVdy%m>3#X-g#p_\6LLH w̯B_nQ=E@P0ÒQK8@`}v=O&!Kۃ%FHPRΗ}!boڋ@1n3@9a&3wQ( >1S_ ~O|K/6 e[ E@Pn>1>3xtGD`-8=ðؾ|V:KW[lIX+箻i\ Y4|Ɯ~yP͓x4UE@PRpI b֋<$4{_J,q`+*W $l@2h4G̊=Cի_:_EaM E@PŠDXw8N_OY!$RVU, *iGmD@͛ݜbBWٌ+"L$)!P]T' . EbחT4@#dJb84("\񸑅/Rn'C}ܶ GpO.A PӐ_""B*>4GSE@P#HiIu3\ 2\P@IDAT,ijCVQmް(8z24E@P31$iO=0H8IHI:Ir5i(  ĶGYf]vR1S2/|1;.VؤZϹƤ("CI<*dIV:6`$(zľ 8/>ЖxvNE:HOR\B/E@PCI;4I.|6>ؗ #8Dnw\rz",i^ &}W8ٳ& ͘N:eE iڴit9vZ|g[|)q|rƕ+W*rF"(7<^blW.5r9-2m,`zD$赊+4XOxޅ n:4٬}fp^lX c̯_~e 7nh`SSS.QU>eo6+<E@Pf?2iύn( rm/۟1֑6v38:Wjڲ*2PE@Pf<lHǩ*-f݃U0͖Q22$V@uqi \'O6{ { :B\g GNkKEm\Xg̘aHWX TҿC3Xճ (t]PE@N1ǔ. ֒UY( VCv__]/ӧeM@ Yj˘W3ge<썲*B/}^_~k_3YiNPE`l23DHYRە\!r[|(c7 AO%zdhGDO|fΜiX+i_ӧbmQUyUo !"("<r)#lˀmur6"/J =3Q%6GxR![2S `yڸqs-J{*"(IDc{؟$OrgsKdFZ]/YBA9؉ x6>Eo\6Y!8GZw˵jW*"\ȭrdG #$/ikO/+%: NjL\DNGC.uE@PE:Ex3Gc""LaնjnJ5U?۷E@PE` +lYS֖#V x2/i=~Q4mI*"(xD aڡOfۤ嫱5~C VZ|@ :V/zƺn9qW:^7tq~E@P 㽤5h"?A|@d!*c#fc&@rr0A{{{ ©jG*"XT2WCJ+B3>#̮ww%E>'Aک(k}@.pS+VI}EΫ0vqK/DOą3P9X?o7};] `TPE`\#MfJ8@`>;eZi,頤S\.op;`ץ@jWJA~F̈́SO8A;w4͎vyCpWn˜7R#~.H,@3"(8V9N >>"2߈Y" %X%n%PX?Vq W &ڵkOn,za3f0sf;ÔqۜЉ=~n̙377裏7\˻T E@P 9vh%XA20o]U , neZC\)/6z a!|UE@PJ_}Č-+ I(|M+a^sBa5i$_|0;=O 1R+Oi{0 /=b ya_jiiɳKKPEG@xJ .~F3JvB:_v>ķ >x`4t@,vݛ'lt2$1Y"m`o?o aeeF("(@ǭ</DnV|:y\;!vIԓiD bm޼Ds1u̢lqF8p4:w%q5U("DK uUl;Pe0ON.FEi(3 TeK%xH d8tP<3}v3[ 2K=~8a;{9Zp!XeGlÇӴc9s&-]ONsͣ ("p#P_I0IOfۣ$G(u (ꡜM]N2i0mۇyDFSc9a& W6 ā3,YbٺѣG駟xzGh˖-`Nm"(P*{ %nշ%Ipn:qYN>=XLJlñȃP 6H͌3_J++K[6m"[33LhOQci#0CC+藎~-*"( P_쇯]#>iiuG%`AT HulR&φ ]S4at`y0ɤ%JUdRE@ xLWyb#P㓗 Xv@@/hrdALuZľ&,Mz`5PěX"(;)_I wfJ/X/-+u$'S ba>P3E ^'N|Ͽ'O)\Lַ3'ŋ校rzYYm|KqD0Qc{;M:dR"(@2p0ĵ\"tO5$ E>V: 0y7/]gΝ35`Ү]tʕt ?.-b^޽͙s]x/|pĆD\&Y d{@/E@PE &XEc~GCdfIrWl9t%Kp,Az BΧ#n [/K苔r,` @nY̐&qX@,-  RU>.my[Q-LJ|.@z /Л?ߓɨIjY劀"(7eЁ$.\RFZ]/tˌU!#A8EIpFďG%o13<0@C3Kb Z{|Se,z^mkK n^4O;H +lǸ^VvCߍB!ѿ"(@Ih$Or >m6tb42 9%d'AF55A1@Ȇ^_zfϘIs .,T֨v(j  {z$~.\HM<?L-ٻo_HY'fꙌ*t5("0pTMTҹe `E&}uDK[:| sZR+0fq9Ě >6gO=M ,'zʜyeo?LWYU,X.gV <~2A$#\&BVE@$4zwvgbH8l_^qmTt|L0SqO|gGw(,É 3VndHX;52"NzvUJ`X 2/K-2rza*" d44(2 %Xe#QrYjb_L 7 _^,~_|ӧիxP("Pkx,)w+!\bD, WE[>TՅ"(??覑E0.2YYy>h%A$,T:VBBc+8糩x WKВ90|K,,IIӑv֖)"\d%nZUaeмYmXj:"tE򢦱,/\9/Qu\#pr#Ըx p +1KPE@d\Tvuy >" E/(JI=;X;oy؊}QL# L;JjU4.mP Ll׬-6|RgCK 3`R>X5׉k w}сh.IpwV 0{9'N0LNsFr]>;E@P+\`0ܫp4.t16mu/D^z%3d_nE;Ii:ÛOP"(1<ɽC0&.7qaٸ:PPZN%*ÜK&aVj4{"Ҁ!0n: ~Y*5>Zpi[ɓ'iEsC6w%r̤oGGRM*6"(76j7IN>';zjh_F1W3~ !. q ]vQ+̰!0sӧ?iWBC`s1-C.'/znL2KdX 9 D1$^.&f֯_OO>Wb-f~7ӄ}v7o^͖#*"$4v>mzn9<@0l|I>c;aRRW_5!e舞<DB@1ذ~%V&†ߥ|3/a_Z›&՟"(" Afb#u%i(*1LPq.G]<),bf Ku#΄ jBzz{h ߏC\կ~< |nlE@PF Z8AiK6A DJm]}~ rAZEu {g+u:vIsrCyJ HHG$^" BB!'!^$$&8S<=|>]};q9_tE9e]y?., ___ebַz_>+`@ aV3yƨdi;ĀE6ƹ[ o߶̀ue8b{N6Z9@ p|}"1`Ӫ2 3h5<(Δl6gs&9\u"O C@9 r?k6Auw^~@ߗdnR>[N]_mDks|?X@ @ Vi/se[--宧=LS @ &Ҁ7o}9j1Y:`5gK~[_}ϓ-| @ ]M➿<1@'KU'?XG(|#YˏHrpb#|h @ @ ACmF@gW϶ʳ7I ت6o5kz-KmZ닺}9?>t̙7r@ mH[,ޔе>f4g!̕>u(F42Nmd^}{_tm|'W@ @_-k{@`u5lO@IV 8d-~k˩v/p*4\S+O=yhK믿>ߡ_x< |޺;X v /|KߟOoC=믿8uw]wu7|sop\F@ Ç@jOݼZ3,fY{\-'縴iW^}O?IY?f1nc&TNC(=ug.~'^[M~äw6܁@ o6 Tk 6PYY]İres?ȝ#>i jذ/8gQ+"=bQb3mFEQ#@0! -5jXZ)[OOM[o03}0Ppkg?9Tj@ۗY:'lb@ C} S7)qOiXIFmoIO ,;8s륗^o?T?W\q>`Q;.Kx@  CǏVS31`q}7n|ᤡ;UX:}qW_؜GsN6A*JOW_}3-Kx@ Cid^Pq&Mht<_=3yqa__r^ |e~_r@w1wQ@ 8tj˓rCr6[rՀ@?6WKOzl;d{ .wj  ߗixC QAĉ/?J\zsm,| @ \d̆}kZ9ֿ",5Is%}/:)1[Chz=/cX[n)귩Ԑ"[~11XO7;p|gלKC_h^W`@ @+3/}vDXKm.j[FLMKgWk[;Fd D }D_4ؙ})}\od1ɀFl|HnNB>}Rm0w5ٶџ`]D@ #!?}t2s6 RM42@ 3~4;t6__8 {"WѦr6Y_ׄԗ >@ &~F,jglK:l5⠪au3XnI`gՕOw6l0=q ԃ-@ I~EhˬR:QJVӕRat:Z1zrMɷIA3 8~2ɷ4X)jU_JU_gU`@ ʫav𻜗[TJVչ@ë!⏏K: ZCړ5zjY9_.~w]vYcl_OmA7EZ+X-՚U ꪫ wt(=yO裏f_ 8rf^{mD~u]tQVS9<@wׯzTyo}[ݫ Rw|:@ ~e5,vP\}w(ߩ> ]س&Ѕ{l5P|9-ɓ'TWEAۅH{ڿ}Sww;{oqjڈ @ x 33hnnl;XM\~9,Š%'%y甁Fd~뮻.Z|v u՗ް_W`ŝ.- 78\s5y>-V @ 0@G1/ڒ]y%iUJ6Ԝ+zsJb|>[Q an:@ stz׻u1W>=yG? OTg}~d J[fc@ gUVLX1sMsdO/jCٷR[/&r4cE ug{`/~1?_ reȹ[W/3}pM/bctM#gy&\7`}os{C+@ B iyIdy!Y9:`$US'#*.mު[?"8W HE]wݕq׆Agx৻lW[bb_߮4`Ԉhݐ]>UAA@ @X"Pδ34$bJ:>-UY~Ȏk=W9mqs+tQm^lR_LA@ hv`O*;_ qC^)4`)B Ӹo,&ٜgװ24 n/ +ݡCxnd"_ QA߹ȵyC_)nEK*)1$s)_VS4Lm*/շBV}yB8N:ٽk-}W_~9[-ît0 @@ V j;o2;<}`rMi`O¹꫻|#9Az駻￿}o8ֆ*#˫O.o}{|K/uzV#Vl|[;ja@ 8hi=}loH_tioF*iH@N>J/=O.z{wjc7?#NC϶שSYG<`S5'u]}??s饗S9kq:@ E`gHk8MY_/(Kt!-^}]k6+O>dǝ*-M?pǝ;/(Y hdI|;7/u/Uy!j|46`O:;?|t7@ @;3X1J` aR\Mj+nE5aaG~8a֍7ؽkh){sx%|wv7|jԆU{7A?菺??Xa~ȱ@ W{+~DHP֔u |!M:hKW~3`Wc3<=ݯʯ"[2>6@  !]{,0?sHGKyߔ#B %Ӕr[?3:]Q P~gW^yN"}Ct7'_ 9xǘ<+#]8@ {^^ogdCY?mdo5TĞs-}e-Ta6N'zVy> }>{{ߟuRY)vK}18uҗ=ܓ=xK.碻M_P'C \ԉ@ ㍀pyEK'ߺW!_>:ͺk(NLMc׉J'SSvߪB $ ({]*,XxcG?okW\>S_ U??B ?xrM]A ߱ȿ˅,?X@ G C[ަ~Ykieb_ %UQ>cp(-dи˳SKGk^ԏu`ࡿ>teK_0w.7jU#@ O ~Q{)/&ZqCq%Z!]^S.o9V-JmX[6fί:A@  3iyDЎ_cc^=k+Z^'8K`/*?ə zJƠ@ @ pOrfYJy+{PdcǕ|)Y0O'jHO,@ @ pdQe6GCqmqڄm/ 5l}i:@ @ pXL>ټrKMC.mo@ *j~߬ r2jgK C%jm%o2NFz䒎?SbvJ'x{ݝ8q"f7IǞo~v_4ʷXvt^07c}&g*/U'@ _~_ 8q?'t((TѤ|KMN.wA5(S3$ vlz׻7{s[^x"ϗ^K_w Xs/ad h@ @`% LYzKœJ#o>Z,5Hq-;|}k>c6zw??};wn ?Oh\kuwvMBUgɿ=0ͽtVg?|ǻ__wdn @ 8>1yg.KYs1闡(3[XK$o˜FjW4#[{?R6m/3 ˴կ~=GqO/8Y[~A[.+zA1}/>jOӫhxC @ 8zؽaT\J:*Q4kKd_::e^:>7z衧0uGBW 3d5onnJ#ffK/Ro|L>G;܉}SoҚٟY;;ݭ^s=I @ 8,<,7ik(Q%k뀵I{D%QldSO?4ۿ_K]gd@+W~ʙz?}<ՕWc? Yc}My{<~tݫ~z ~?ZI/:7jx\pA{{n,u61bi=0=B%r?o,ztZzkeT<&MNۭ,^DUL<{'?PoϽ<}vpe/=w"cP>p(S{iȽvEs!f{W3c{<Hw_aj/$6d|o^r@K$qj=8xh{:p|3K}}=>Cw4;xҬ]5G# ٍ:;!wl~I*Cdx]w˟ԧL]:7 _W#]Z_%M|GiP_͐+|7%>|,Gq< R.zÞm.م"Og]$I#x>Wz#w\sM'k_Z+s>BIoߘzU?w~GOOݯگu}}Cz\$[:ձ5{3|:/yV\Α'RBoy][0 _zs>ڥB ng⿻EWa`ϯ[*_߭ǥI [Z0]5^qS(M?˽qyS c[2YIAӎ<;>QyGyOOgMu!G%}Ov[x-',K BC%C^Fwg'}l{t\ӡzkGS?V/_뵤W}MF;Y1i8]khȺLrqGCx\E{pU|Y~^PGZұIG/LF-kkl\NyTK=M:`J(}e6vIm[OغR^wO9K|h$a]SSi/MiM%mq$It?tTwxSbOXJ7JN8qv 'V5DWvOZ[\c_Z+$9y_+[ #9I6+&w;_ȝM'liP"29a~y,̧<{o&:V͓y\Gu Z E3d fu1K~ij]H΅=pɲU}_aZԄk[^W<_dǂ =Xֱ|3«@W1..5鸆k W5-7-2u MK6ii\bW%P~yw*՗ -mC{i|&d˃4ɴLL-.}\e*e%ϤS{L ]He>ӒՋ/Qts%C%[-AK RǤ4 ˱泴)=V/Ex]$ɲ @ [[yd{hP6 @iE|NjZKQ{j|J5T^-/Y瀍%ʪх]ɺC$dW<9bFzӁ낝籾\]xՓN2%y!M~5C<ksbe5Z!MrqPK,C=Ϟ*tP{0(IOr'ܢ\)j#曆k4`#E:=P>`e=rq0X%|v [ O,X eyjuGRjz^2#jq赦bȉMeUZxqJq=q:WS?W[I>m~6,s&N3um,ۛJuT 6dtæfcV:N=S~Ky.}c_1P|2Ky,c㬿xDo)|p%_t6[C|r[FǒMTl4/*})?ڀ$vxP@q 2D$ɥ8hW?b$jaS=t:C}$6K,b>!MSkE\]ZmԛaQvׄc,R#s/#[DYW-啌꒘et,omy%,o*@zA9NJq>?ȉ&S)ۑ9X\8bB;RG^ oʉl%/: MV~v)٣O^ X(m\.%߽":x(RVydl@ l탥yɖ|V[O]ґUlP_hznhDC\jP' 􌔵gҡTS=Xr{J:j:djgcd(-~%^CYbIV'~LP=ժT [8:%?O)\Et~}qĩ֘>wq ԏA}\%_EekyXg}-OP\-Kj3h.gY&sƩ9[OqAꓵځlj8ŋ*jzyʯz-J/6(%ju=U^̂r*I'=k;Mҩ'dyձXE ea>!@ F}/d?)xK;\zxy_9_6l4X 8ĠUdS"م/KQS<ޤ.٦kT~P_ZZz>1+0],]l{ݎ0`}^XW=U zk e,yD5\IQeu %ohZ9W7%!ɖ'W԰vdV.m@ lA[vd +8y[![Ƨy.]H_k;`J{+K)NzAxkC|\,:HN<]^V٘@ m#P'}MXy>*=d^I侪k5Ҳz˗| X,zBd[@ ;N=|Y2z\P|H.~ɮ^/o5~ͱ8[r5&wwMeq`b>eR/j-z짞yj\Q^c,RUh x}i+( _HO!jsF׊{_\kճ~^,l}-og6+~J\9QY^Z>'/-6N>+Q̾*Y%xj9mdՂ$/ūll}>ֿ:`ф.-$(6оeaHP{\ t=>IG/%S>PlC6Ok"np3P+{<%{?lѳZoeekXqӟd)v_NKCq_ӮP5^ʒ Wsfezd:Ot:WPch5W8U=pR/²$ck'XbWK25C׷jlmlrSsƩ6^KjR!DvV⥃Z/%?%]^H"o]/QJB&ē rYJNЩ)U /֐Kŧ@ @`*/5oi,;$<ҋ*ҋd(FWzX$ C6S94x K<_k* V(e_X򕿷a`}@ vg}}Gr̂z+[[.ecY-ūt6}6L$ ѨSXbї@dyNy58I&t%׼б<`^UM^cOYY-uV<1VE_V_ %OhRCoė(qfWIxl:QaF>x, /B@ @p [ߕ[KgK\:oI'*dKlo0hl o xQk{[I.|-e i>@ @0#s۫YY(q}K>-/ײvDlYѩ@_ج]{ zWqI/Y9$N\/b983g}-_qȬ!Rb_C`}ZZ] O~,]-yyq=B7tqm+A}j:Xcc)B::d?&?}l0:SQKzK=_ѱo!k @ p< ^oe !*^zɖZy_fie!Rd8h-X]CkCֲ1R[mG^S1WQgve?JcJQ^CYRt,_o]n;2]~%YaϯYCgv\qlRZ)q>Y<5Nx㣥8mRZ=S=gjq~^G_6+zol&}mN5]v^b q@[:+xRNc(V*t@ }knk󼗭OlBSV^~!C{^WV9^I'_%oO_z^z<[YYX披g0 KcMdq Kν6TZn-ڕǥML_;神ॗN}J_%>EiyV'^T}XY|>V/n jT<\acY<[>,"ZuNueKK\ɧZ||:oa<kՒ} y%>>Og TJT_Sk:nC>9Z{qs6rLa^uڗ(a=bya^WZt%Xǔdt,{9s0iv"{Vgy[%Y:O#T>>|do/Wg}-_-5j}-_/MP\^ӗY_|%Zkug7%>j~s5,?.ԳqKWW\Z^vO=>6dYhv+[^%)_OkoPȠsCkΚɴe}jڷv_t. Z+]~2Z-,]Hs7d/2:nvx ͍PWCvy8jKXz>J=|J~J_b:th[+˴u^.t*>-2y\ȩq-=Up] 3Av} ;L=qBe^ezK>c⫘*m}~%T])Ng@ %4?or{+jҗh_ngl҅v7]}\B@ ϵ_^_җtcԳ#Ѵd15|bhwnO _.Ę| @ p@=||e9jz| }3jX[gȿgHc|Q@  l\ͿS<->&ehJ4Vj%g@ @ X `ago.[+n=Ĕ1~4wKmS@  ([Ϧrk޹l߭mdL S뎉$ 9@ 8n0ؽr<~q>O|V缎蔓'_K[l,U!SR:b+ya=؇bDʡ{ZΒl)'*%FQ1%>|b46 X5?1֗9؜@ ojg[rlcoaziŬ]4 ioezŋ!9x^g}-_cue5yYqصE:+mCfE2)Q얗 gc+]Kњ^nlue_XOzŋ8m}\{ø6rF좧@ @"g[y'x؇]z\(@ @mO<]ԘQ"dCo9h+Ijqv-%NyWZ춖w&l?\sL|}^Ղʧk=*[8ɻZEj\ '[RXY戳yX"N5|?*%Οؼ%8_'ôJ=r/r .@ @ DH Uj!8F@ o'0U89MWzMeܦRζ#n;cLָeRζwzb]Jdv׿ƼJܘs:jGL]1Nčk0 _@ @ @ @ @ @ @ @ @ @`V?I~u_]IENDB`testthat/src/0000755000176200001440000000000015130237654012707 5ustar liggesuserstestthat/src/Makevars.win0000644000176200001440000000006412661230133015166 0ustar liggesusersPKG_CPPFLAGS=-I../inst/include -DCOMPILING_TESTTHAT testthat/src/test-example.cpp0000644000176200001440000000216012661230133016011 0ustar liggesusers/* * This file uses the Catch unit testing library, alongside * testthat's simple bindings, to test a C++ function. * * For your own packages, ensure that your test files are * placed within the `src/` folder, and that you include * `LinkingTo: testthat` within your DESCRIPTION file. */ // All test files should include the // header file. #include // Normally this would be a function from your package's // compiled library -- you might instead just include a header // file providing the definition, and let R CMD INSTALL // handle building and linking. int twoPlusTwo() { return 2 + 2; } // Initialize a unit test context. This is similar to how you // might begin an R test file with 'context()', expect the // associated context should be wrapped in braced. context("Sample unit tests") { // The format for specifying tests is similar to that of // testthat's R functions. Use 'test_that()' to define a // unit test, and use 'expect_true()' and 'expect_false()' // to test the desired conditions. test_that("two plus two equals four") { expect_true(twoPlusTwo() == 4); } } testthat/src/test-runner.cpp0000644000176200001440000000036712661230133015676 0ustar liggesusers/* * Please do not edit this file -- it ensures that your package will export a * 'run_testthat_tests()' C routine that can be used to run the Catch unit tests * available in your package. */ #define TESTTHAT_TEST_RUNNER #include testthat/src/init.c0000644000176200001440000000065215054053615014017 0ustar liggesusers#include #include #include // for NULL #include /* .Call calls */ extern SEXP run_testthat_tests(SEXP); static const R_CallMethodDef CallEntries[] = { {"run_testthat_tests", (DL_FUNC) &run_testthat_tests, 1}, {NULL, NULL, 0} }; void R_init_testthat(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } testthat/src/test-catch.cpp0000644000176200001440000000173614164710003015447 0ustar liggesusers#include #include #include #include namespace { void ouch() { std::string message = "logic"; throw std::logic_error(message); } } // anonymous namespace context("Catch: Example Unit Test") { test_that("4 + 4 == 8") { expect_true((4 + 4) == 8); } } context("Catch: A second context") { test_that("2 - 2 == 0") { expect_true((2 - 2) == 0); } test_that("-1 is negative") { expect_true((-1 < 0)); } } context("Catch: Respect 'src/Makevars'") { bool compiling_testthat; #ifdef COMPILING_TESTTHAT compiling_testthat = true; #else compiling_testthat = false; #endif test_that("COMPILING_TESTTHAT is inherited from 'src/Makevars'") { expect_true(compiling_testthat); } } context("Catch: Exception handling") { test_that("we can use Catch to test for exceptions") { expect_error(ouch()); expect_error_as(ouch(), std::exception); expect_error_as(ouch(), std::logic_error); } } testthat/src/Makevars0000644000176200001440000000006412661230133014372 0ustar liggesusersPKG_CPPFLAGS=-I../inst/include -DCOMPILING_TESTTHAT testthat/NAMESPACE0000644000176200001440000001355515127561732013353 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(as.data.frame,testthat_results) S3method(as.expectation,default) S3method(as.expectation,error) S3method(as.expectation,expectation) S3method(as.expectation,skip) S3method(as.expectation,warning) S3method(compare,POSIXt) S3method(compare,character) S3method(compare,default) S3method(compare,numeric) S3method(format,expectation) S3method(format,expectation_success) S3method(format,mismatch_character) S3method(format,mismatch_numeric) S3method(output_replay,character) S3method(output_replay,error) S3method(output_replay,message) S3method(output_replay,recordedplot) S3method(output_replay,source) S3method(output_replay,warning) S3method(print,comparison) S3method(print,expectation) S3method(print,mismatch_character) S3method(print,mismatch_numeric) S3method(print,testthat_hint) S3method(print,testthat_results) S3method(snapshot_replay,character) S3method(snapshot_replay,condition) S3method(snapshot_replay,source) S3method(test_mock_method,integer) S3method(testthat_print,default) export("%>%") export(CheckReporter) export(CompactProgressReporter) export(DebugReporter) export(FailReporter) export(JunitReporter) export(ListReporter) export(LlmReporter) export(LocationReporter) export(MinimalReporter) export(MultiReporter) export(ParallelProgressReporter) export(ProgressReporter) export(RStudioReporter) export(Reporter) export(SilentReporter) export(SlowReporter) export(StopReporter) export(SummaryReporter) export(TapReporter) export(TeamcityReporter) export(announce_snapshot_file) export(auto_test) export(auto_test_package) export(capture_condition) export(capture_error) export(capture_expectation) export(capture_message) export(capture_messages) export(capture_output) export(capture_output_lines) export(capture_warning) export(capture_warnings) export(check_reporter) export(compare) export(compare_file_binary) export(compare_file_text) export(context) export(context_start_file) export(default_compact_reporter) export(default_reporter) export(describe) export(edition_get) export(equals) export(equals_reference) export(evaluate_promise) export(exp_signal) export(expect) export(expect_all_equal) export(expect_all_false) export(expect_all_true) export(expect_condition) export(expect_contains) export(expect_cpp_tests_pass) export(expect_disjoint) export(expect_equal) export(expect_equal_to_reference) export(expect_equivalent) export(expect_error) export(expect_failure) export(expect_false) export(expect_gt) export(expect_gte) export(expect_identical) export(expect_in) export(expect_invisible) export(expect_is) export(expect_known_hash) export(expect_known_output) export(expect_known_value) export(expect_length) export(expect_less_than) export(expect_lt) export(expect_lte) export(expect_mapequal) export(expect_match) export(expect_message) export(expect_more_than) export(expect_named) export(expect_no_condition) export(expect_no_error) export(expect_no_failure) export(expect_no_match) export(expect_no_message) export(expect_no_success) export(expect_no_warning) export(expect_null) export(expect_output) export(expect_output_file) export(expect_r6_class) export(expect_reference) export(expect_s3_class) export(expect_s4_class) export(expect_s7_class) export(expect_setequal) export(expect_shape) export(expect_silent) export(expect_snapshot) export(expect_snapshot_error) export(expect_snapshot_failure) export(expect_snapshot_file) export(expect_snapshot_output) export(expect_snapshot_value) export(expect_snapshot_warning) export(expect_success) export(expect_that) export(expect_true) export(expect_type) export(expect_vector) export(expect_visible) export(expect_warning) export(expectation) export(extract_test) export(fail) export(find_test_scripts) export(get_reporter) export(gives_warning) export(has_names) export(is.expectation) export(is_a) export(is_checking) export(is_equivalent_to) export(is_identical_to) export(is_informative_error) export(is_less_than) export(is_more_than) export(is_parallel) export(is_snapshot) export(is_testing) export(it) export(local_edition) export(local_mock) export(local_mocked_bindings) export(local_mocked_r6_class) export(local_mocked_s3_method) export(local_mocked_s4_method) export(local_on_cran) export(local_reproducible_output) export(local_snapshotter) export(local_test_context) export(local_test_directory) export(make_expectation) export(mock_output_sequence) export(new_expectation) export(not) export(pass) export(prints_text) export(quasi_label) export(run_cpp_tests) export(set_max_fails) export(set_reporter) export(set_state_inspector) export(setup) export(show_failure) export(shows_message) export(simulate_test_env) export(skip) export(skip_if) export(skip_if_not) export(skip_if_not_installed) export(skip_if_offline) export(skip_if_translated) export(skip_on_appveyor) export(skip_on_bioc) export(skip_on_ci) export(skip_on_covr) export(skip_on_cran) export(skip_on_os) export(skip_on_travis) export(skip_unless_r) export(snapshot_accept) export(snapshot_download_gh) export(snapshot_reject) export(snapshot_review) export(source_dir) export(source_file) export(source_test_helpers) export(source_test_setup) export(source_test_teardown) export(succeed) export(takes_less_than) export(teardown) export(teardown_env) export(test_check) export(test_dir) export(test_env) export(test_example) export(test_examples) export(test_file) export(test_local) export(test_package) export(test_path) export(test_rd) export(test_that) export(testing_package) export(testthat_example) export(testthat_examples) export(testthat_print) export(testthat_tolerance) export(throws_error) export(try_again) export(use_catch) export(verify_output) export(watch) export(with_mock) export(with_mocked_bindings) export(with_reporter) import(rlang) importFrom(R6,R6Class) importFrom(brio,readLines) importFrom(brio,writeLines) importFrom(lifecycle,deprecated) importFrom(magrittr,"%>%") importFrom(methods,new) useDynLib(testthat, .registration = TRUE) testthat/LICENSE0000644000176200001440000000005615040747537013134 0ustar liggesusersYEAR: 2023 COPYRIGHT HOLDER: testthat authors testthat/NEWS.md0000644000176200001440000024503215130037752013221 0ustar liggesusers# testthat 3.3.2 * testthat now emits OpenTelemetry traces for tests when tracing is enabled. Requires the otel and otelsdk packages (#2282). * `default_parallel_reporter()` is no longer exported; use `default_reporter(parallel = TRUE)` instead (#2305). * `expect_snapshot()` once again reports the original error class for base errors, rather than `rlang_error` (#2286). * `expect_snapshot_file()` once again works with shinytest2 on CI (#2293, #2288). * `expect_snapshot_file()` correctly reports file name if duplicated (@MichaelChirico, #2296). * `expect_success()` and `expect_failure()` now always report the observed number of successes and failures (#2297). * `LlmReporter()` is a new reporter designed for use by LLM coding agents. It's used automatically inside Claude Code, Cursor, and Gemini CLI, and you can set `AGENT=1` to use with any coding agent (#2287). * `local_mocked_s3_method()` and `local_mocked_s4_method()` can now mock methods that don't already exist, and can use `definition = NULL` to temporarily remove a method. `local_mocked_s4_method()` now also works when the generic is defined in another package (#2302). * `local_snapshotter()` restores `snap_dir` to be the first argument for compatibility with devtools 2.4.6 (#2309). * `test_dir()` no longer runs tests in parallel if only a single file is being tested (#2305). # testthat 3.3.1 * `expect_snapshot()` and friends only emit the `snapshot_download_gh()` hint when running in a job named "R-CMD-check" (#2300). # testthat 3.3.0 ## Lifecycle changes * testthat now requires R 4.1. * `expect_snapshot(binary)`, soft deprecated in 3.0.3 (2021-06-16), is now fully deprecated. * `is_null()`/`matches()`, deprecated in 2.0.0 (2017-12-19), and `is_true()`/`is_false()`, deprecated in 2.1.0 (2019-04-23), have been removed (#2109). * `local_mock()` and `with_mock()`, deprecated in 3.0.0 (2020-10-31), are now defunct. They technique that made them work is no longer permitted in R 4.5.0. * `test_files(wrap)`, deprecated in 3.0.0 (2020-10-31) has now been removed. ## Expectations and tests * All `expect_` functions have had their failure messages rewritten. They now all state what was expected, what was actually received, and, if possible, clearly illustrate the difference (#2142). They also consistently check that their inputs are the correct type (#1754). They consistently return the value of the first argument, regardless of whether the expectation succeeds or fails (the only exception is `expect_message()` and friends which return the condition). This shouldn't affect existing tests, but will make failures clearer when you chain together multiple expectations (#2246). * It's now much easier to create custom expectations, thanks to an overhauled `vignette("custom-expectations")` (#2113, #2132, #2072), a new `pass()` function to use in place of `succeed()` (#2113), and revisions to how `expectation()` works (#2125). `expect_no_failures()` and `expect_no_successes()` are now deprecated as now `expect_success()` also tests for the absence of failures and `expect_failure()` tests for the absence of successes. * Tests (i.e. `test_that()`, `describe()`, and `it()` calls) can now be arbitrarily nested with a shared stack of descriptions so that the failures can show the full path through all subtests. Skips in subtests are now correctly scoped (#2007), and each subtest and will skip if and only if it and its subtests don't contain any expectations. * On CRAN, `test_that()` will automatically skips if a package is not installed (#1585). Practically, this means that you no longer need to check that suggested packages are installed. (We already didn't check the installation of suggested packages in the tidyverse because we think it has limited payoff, but other styles advise differently.) * When running a test interactively, testthat now reports the number of successes and failures. If you're using nested tests, the results should be more useful and you will no longer see duplicated failures (#2063, #2188). * New `expect_all_equal()`, `expect_all_true()`, and `expect_all_false()` check that every element of a vector has the same value (#1836, #2235), giving better error messages than `expect_true(all(...))`. * New `expect_disjoint()` expects values to to be absent (@stibu81, #1851). * New `expect_r6_class()` expects an R6 objects (#2030). * New `expect_shape()` expects a specific shape (i.e., `nrow()`, `ncol()`, or `dim()`) (#1423, @michaelchirico). ## Other new features * New `extract_test()` function to extract a reprex from a failing expectation. tests run in `R CMD check` will use this to automatically create a reprex in the `_problems/` directory for each failing expectation. You can turn this behaviour off by setting `TESTTHAT_PROBLEMS=false` (#2263). * New `local_mocked_s3_method()`, `local_mocked_s4_method()`, and `local_mocked_r6_class()` allow you to mock S3 and S4 methods and R6 classes (#1892, #1916) * New `local_on_cran(TRUE)` allows you to simulate how your tests will run on CRAN (#2112). * `test_dir()`, `test_file()`, `test_package()`, `test_check()`, `test_local()`, `source_file()` gain a `shuffle` argument that uses `sample()` to randomly reorder the top-level expressions in each test file (#1942). This random reordering surfaces dependencies between tests and code outside of any test, as well as dependencies between tests. This helps you find and eliminate unintentional dependencies. * `try_again()` is now publicised, making it easier to test flaky code. We changed the first argument to represent the number of retries, not tries (#2050). * New `skip_unless_r()` skip tests on unsuitable versions of R. It has a convenient sytnax so you can use, e.g., `skip_unless_r(">= 4.1.0")` to skip tests that require `...names()` (@MichaelChirico, #2022). * New `snapshot_reject()` rejects all modified snapshots by deleting the `.new` variants (#1923), and `snapshot_download_gh()` makes it easy to get snapshots off GitHub and into your local package (#1779). * `expect_snapshot()` and friends will now fail when creating a new snapshot on CI. This is usually a signal that you've forgotten to run it locally before committing (#1461). * New `SlowReporter` makes it easier to find the slowest tests in your package. You can run it with `devtools::test(reporter = "slow")` (#1466). * New `vignette("mocking")` explains mocking in detail (#1265). * New `vignette("challenging-functions")` provides an index to other documentation organised by various challenges (#1265). ## Minor improvements and bug fixes * Interrupting a test now prints the test name. This makes it easier to tell where a very slow test might be hanging (#1464). * Fixed an issue preventing compilation from succeeding due to deprecation / removal of `std::uncaught_exception()` (@kevinushey, #2047). * `expect_lt()`, `expect_gt()`, and friends have a refined display that is more likely to display the correct number of digits and shows you the actual values compared (#2006). They have a better display for non-numeric data (@stibu81, #2268). * `expect_matches()` failures should be a little easier to read (#2135, #2181). * `expect_no_*()` now executes the entire code block, rather than stopping at the first message or warning (#1991). * `expect_named()` now gives more informative errors (#2091). * `expect_s4_class()` now supports unquoting (@stibu81, #2064). * `expect_snapshot()` no longer loses the last snapshot if file is missing the final newline (#2092). It's easy to accidentally remove this because there are two trailing new lines in snapshot files and many editors will automatically remove one if you touch the file. * `expect_snapshot()` on reports how to resolve failures once when running inside `R CMD check`. * `expect_snapshot()` no longer skips on CRAN, as that skips the rest of the test. Instead it just returns, neither succeeding nor failing (#1585). * `expect_snapshot()` and `expect_snapshot_file()` hints now include the path to the package, if it's not the current working directory (#1577). * `expect_snapshot()` gives a more informative backtrace when the code inside the snapshot errors (#2277). * `expect_snapshot_file()` now clearly errors if the `path` doesn't exist (#2191), or has been used alreaday (#1592). It now considers `.json` to be a text file (#1593), and shows differences for text files in the console (#1593). * `expect_snapshot_value()` can now handle expressions that generate `-` (#1678) or zero length atomic vectors (#2042). * `expect_vector()` fails, instead of erroring, if `object` is not a vector (@plietar, #2224). * `JunitReporter()` no longer fails with `"no applicable method for xml_add_child"` for warnings outside of tests (#1913). Additionally, warnings now save their backtraces and `JunitReporter()` strips ANSI escapes in more places (#1852, #2032). * `local_edition()` now gives a useful error for bad values (#1547). * `ParallelProgressReporter` now respects `max_failures` (#1162), and test files with syntax errors are no longer silently ignored (#1360). * `test_file(desc = ...)` (i.e. test filtering) no longer loses snapshot results (#2066), works with `it()`, and can now be character vector in order to recursively filter subtests (#2118). * `test_that()` no longer warns about the absence of `{}` since it no longer seems to be necessary. * `set_state_inspector()` gains `tolerance` argument and ignores minor FP differences by default (@mcol, #2237). * `skip_on_os()` gains an option `"emscripten"` of the `os` argument to skip tests on Emscripten (@eitsupi, #2103). * `snapshot_accept(test)` now works when the test file name contains `.` (#1669). * `snapshot_review()` includes a reject button and only displays the file navigation and the skip button if there are multiple files to review (#2025). It now passes `...` on to `shiny::runApp()` (#1928). # testthat 3.2.3 * Fixed an issue where `expect_no_error(1)` was failing (#2037). * Fixed an issue where calling `skip()` outside of an active test could cause an unexpected error (@kevinushey, #2039). # testthat 3.2.2 ## New expectations * `expect_s7_class()` tests if an object is an S7 class (#1580). * `expect_no_failure()`, `expect_no_success()` and `expect_snapshot_failure()` provide more options for testing expectations. ## Bug fixes and minor improvements * testthat now requires waldo 0.6.0 or later to access the latest features (#1955). * `expect_condition()` and related functions now include the `class` of the expected condition in the failure message, if provided (#1987). * `expect_error()` and friends now error if you supply `...` but not `pattern` (#1932). They no longer give an uninformative error if they fail inside a magrittr pipe (#1994). * `expect_no_*()` expectations no longer incorrectly emit a passing test result if they in fact fail (#1997). * `expect_setequal()` correctly identifies what is missing where (#1962). * `expect_snapshot()` now strips line breaks in test descriptions (@LDSamson, #1900), and errors when called from a `test_that()` that has an empty description (@kevinushey, #1980). * `expect_true()` and `expect_false()` give better errors if `actual` isn't a vector (#1996). * `expect_visible()` and `expect_invisible()` have clearer failure messages (#1966). * `local_reproducible_output()` (used in `test_that()` blocks) now sets `LANGUAGE` to `"C"` instead of `"en"` to disable translations, avoiding warnings on some platforms (#1925). * `skip_if_not_installed()` generates a clearer message that sorts better (@MichaelChirico, #1959). * `with_mock()` and `local_mock()` have been unconditionally deprecated as they will no longer work in future versions of R (#1999). # testthat 3.2.1 * Fix incorrect format string detected by latest R-devel. Fix thanks to Tomas Kalibera. * `expect_snapshot()` handles unexpected errors like errors outside of snapshots, i.e. they terminate the entire test and get a traceback (#1906). * `JunitReporter()` now uses ensures numeric values are saved the xml file with `.` as decimal separator. (@maksymiuks, #1660) * `local_mocked_bindings()` can now mock any object, not just functions (#1896). * `skip_if_offline()` now uses `captive.apple.com` by default. This is the hostname that Apple devices use to check that they're online so it should have a higher reliability than `r-project.org` (@jdblischak, #1890). * `test_file(desc = )` will now find `describe()` tests as well as `test_that()` tests (#1903). # testthat 3.2.0 ## Lifecycle changes * `is_informative_error()` and the `wrap` argument to `test_dir()` and friends are now defunct. * `expect_no_error()`, `expect_no_warning()`, `expect_no_message()`, `expect_no_condition()`, `local_mocked_bindings()`, and `with_mocked_bindings()` are now stable, not experimental. ## New features * All packages, regardless of whether or not they use rlang 1.0.0, now use the new snapshot display for errors, warnings, and messages (#1856). This no longer shows the class name, instead focussing on a display that more closely mimics what you'll see interactively, including showing the error call. * testthat uses an improved algorithm for finding the srcref associated with an expectation/error/warning/skip. It now looks for the most recent call that has known source and is found inside the `test_that()` call. This generally gives more specific locations than the previous approach and gives much better locations if an error occurs in an exit handler. ## Minor features and bug fixes * Helpers are no longer run twice. * `expect_setequal()` correctly displays results when only one of actual and expected is missing elements (#1835). * `expect_snapshot()` and friends no longer create a temporary file on every invocation. * `expect_snapshot_file()` now generates clickable links to review changes (#1821). * `expect_snapshot_value()` has an improved error if the object can't be safely serialized using the specified `style` (#1771). * `options(rlang_interactive = TRUE)` no longer causes `skip_on_cran()` to not run on CRAN (#1868). * `skip_if_offline()` now errors if you don't have curl installed (#1854). * `StopReporter` gains the ability to suppress praise when a test passes. * `ProgressReporter` now uses is a two characters wide skip column in order to have a consistent width when 10 or more tests are skipped in a single file (@mgirlich, #1844). * `test_file()` gains a `desc` argument which allows you to run a single test from a file (#1776). # testthat 3.1.10 * Fix for upcoming R-devel release. * `testthat` now sets the `_R_CHECK_BROWSER_NONINTERACTIVE_` environment variable when running tests. This should ensure that left-over `browser()` statements will trigger an error if encountered while running tests. This functionality is only enabled with R (>= 4.3.0). (#1825) # testthat 3.1.9 * New `expect_contains()` and `expect_in()` that works similarly to `expect_true(all(expected %in% object))` or `expect_true(all(object %in% expected))` but give more informative failure messages (#1346). * New `is_snapshot()` returns `TRUE` if code is running inside a snapshot test (#1796) and `is_checking()` returns `TRUE` if test is running inside of `R CMD check` (#1795) * `ProgressReporter` only reports the run time of test files that take longer than 1s, rather than 0.1s. (#1806) and re-displays all failures at the end of the results. Skips are now only shown at the end of reporter summaries, not as tests are run. This makes them less intrusive in interactive tests while still allowing you to verify that the correct tests are skipped (#1801). When using parallel tests, links to failed tests (#1787) and links to accept/review snapshot (#1802) now work. * `set_state_inspector()` allows to to register a function that's called before and after every test, reporting on any differences. This is very useful for detecting if any of your tests have made changes to global state (like options, env vars, or connections) (#1674). This function was inspired by renv's testing infrastructure. * `skip_on_cran()` no longer skips (errors) when run interactively. * `teardown_env()` works in more cases. * testthat no longer truncates tracebacks and uses rlang's default tree display. # testthat 3.1.8 * `expect_snapshot()` differences no longer use quotes. * `expect_error()`, `expect_warning()`, and `expect_message()` now correctly enforce that the condition is of the expected base class (e.g. error, warning, message) even when the `class` argument is used (#1168). * `it()` now calls `local_test_context()` so that it behaves more similarly to `test_that()` (#1731), and is now exported so that you can more easily run BDD tests interactively (#1587) * `skip_on_bioc()` now uses the documented environment variable (`IS_BIOC_BUILD_MACHINE`) (#1712). * `source_file()`, which is used by various parts of the helper and setup/teardown machinery, now reports the file name in the case of errors (#1704). * `test_path()` now works when called within helper files (#1562). * New `vignette("special-files")` describes the various special files that testthat uses (#1638). * `with_mocked_bindings()` and `local_mocked_bindings()` now also bind in the imports namespace and can mock S3 methods. These changes make them good substitutes for the deprecated functions `with_mock()` and `local_mock()`, so those older functions now recommend switching to the newer equivalents instead of using the mockr or mockery packages. # testthat 3.1.7 * `expect_setequal()` gives more actionable feedback (#1657). * `expect_snapshot()` no longer elides new lines when run interactively (#1726). * Experimental new `with_mocked_bindings()` and `local_mocked_bindings()` (#1739). # testthat 3.1.6 * The embedded version of Catch no longer uses `sprintf()`. # testthat 3.1.5 * Deprecation warnings are no longer captured by `expect_warning(code, NA)`, `expect_no_warning(code)`, or `expect_silent(code)`. This ensures that they bubble up to the top level so that you can address them (#1680). If you want to assert that code does not throw a deprecation warning, use `expect_no_condition(code(), class = "lifecycle_warning_deprecation")`. * New experimental `expect_no_error()`, `expect_no_warning()`, `expect_no_message()`, and `expect_no_condition()` for asserting the code runs without an error, warning, message, or condition (#1679). * Fixed a warning in R >=4.2.0 on Windows that occurred when using the C++ testing infrastructure that testthat provides (#1672). * Fixed an issue that could prevent compilation of Catch unit tests with LLVM 15. In the interim, packages needing a local workaround can set `PKG_CPPFLAGS = -DCATCH_CONFIG_CPP11_NO_SHUFFLE` in their `src/Makevars`. (@kevinushey, #1687) * Improve way `capture_output()` handles encoding thanks to suggestion from Kurt Hornik (#1693). This means that snapshots using UTF-8 encoded text on windows work once again. * `local_reproducible_output()` will no longer attempt to set the local language when `LANG='C'` is set or an R version is used that was not compiled with natural language support (NLS), which would previously emit non-test-related warnings during testing (@dgkf, #1662; @heavywatal, #1689). * `test_check()` now suppresses hyperlinks since they'll take you to the wrong places (#1648). * New `set_max_fails()` helper to make it easier to set the maximum number of failures before stopping the test suite. And the advice to set to Inf is now clickable (#1628). * You can now configure the behaviour of the implicit `devtools::load_all()` call performed by `devtools::test()` in your package DESCRIPTION file (#1636). To disable exports of internal functions and of testthat helpers, use: ``` Config/testthat/load-all: list(export_all = FALSE, helpers = FALSE) ``` Helpers are now attached on the search path by default after calling `devtools::test()`. # testthat 3.1.4 * Minor tweaks to output for latest cli (#1606). # testthat 3.1.3 * Package that explicitly depend on rlang in their description file are now opting into a new snapshot display for errors, warnings, and messages. Previously this only concerned packages that explicitly depended on rlang >= 1.0.0. This display will eventually become the default for all packages. Changes include: - Condition classes are no longer included in the snapshot by default. This is to avoid snapshot noise when upstream code adds or changes a class. For instance, r-devel has added classes to base errors. - Warnings and errors are now printed with rlang, including the `call` field. This makes it easy to monitor the full appearance of warning and error messages as they are displayed to users. This change is part of a push towards mentioning the useful context of an error as part of messages, see the release notes of rlang 1.0.0 for more about this. * Test results show hyperlinks to failed expectation when supported (#1544). # testthat 3.1.2 * testthat now uses brio for all reading and writing (#1120). This ensures that snapshots always use "\n" to separate lines (#1516). * `expect_snapshot()` no longer inadvertently trims trailing new lines off of errors and messages (#1509). * If `expect_snapshot()` generates a snapshot with different value but still compares as equal (e.g. because you've set a numeric tolerance), the saved values no longer update if another snapshot in the same file changes. * `expect_snapshot()` now only adds a `.new` file for the variants that actually changed, not all variants, while `expect_snapshot_file()` with variant with no longer immediately deletes `.new` files (#1468). * `expect_snapshot_file()` gains a `transform` argument to match `expect_snapshot()` (#1474). `compare` now defaults to `NULL`, automatically guessing the comparison type based on the extension. * `expect_snapshot_file()` now errors if the file being snapshot does not exist; `SnapshotReporter` also now treats the file directory as an absolute path (#1476, @malcolmbarrett) * New `expect_snapshot_warning()` to match `expect_snapshot_error()` (#1532). * `JUnitReporter` now includes skip messages/reasons (@rfineman, #1507) * `local_reproducible_output()` gains a `lang` argument so that you can optionally override the language used to translate error messages (#1483). It also sets the global option `cli.num_colors` in addition to `crayon.enabled`. * `test_that()` no longer inappropriately skips when calling `expect_equal()` when you've temporarily set the locale to non-UTF-8 (#1285). * `skip_if_offline()` now automatically calls `skip_on_cran()` (#1479). * `snapshot_accept()` and `snapshot_review()` now work with exactly the same file specification which can be a snapshot name, a file name, or a directory (#1546). They both work better with variants (#1508). Snapshot cleanup also removes all empty directories (#1457). * When a snapshot changes the hint also mentions that you can use `snapshot_review()` (#1500, @DanChaltiel) and the message tells you what variant is active (#1540). * JUnit reporter now includes skip messages/reasons (@rfineman, #1507). # testthat 3.1.1 * Condition expectations like `expect_error()` now match across the ancestry of chained errors (#1493). You can disable this by setting the new `inherit` argument to `FALSE`. * Added preliminary support for rlang 1.0 errors. It is disabled by default for the time being. To activate it, specify `rlang (>= 1.0.0)` in your `DESCRIPTION` file (or `>= 0.99.0.9001` if you're using the dev version). Once activated, snapshots will now use rlang to print error and warning messages, including the `Error:` and `Warning:` prefixes. This means the `call` field of conditions is now displayed in snapshots if present. Parent error messages are also displayed. Following this change, all snapshots including error and warning messages need to be revalidated. We will enable the new rlang 1.0 output unconditionally in a future release. * `expect_snapshot()` gains a new argument `cnd_class` to control whether to show the class of errors, warnings, and messages. The default is currently unchanged so that condition classes keep being included in snapshots. However, we plan to change the default to `FALSE` in an upcoming release to prevent distracting snapshot diffing as upstream packages add error classes. For instance, the development version of R is currently adding classes to basic errors, which causes spurious snapshot changes when testing against R-devel on CI. If you depend on rlang 1.0 (see above), the default is already set to `FALSE`. * `expect_snapshot()` no longer processes rlang injection operators like `!!`. * Fixed bug in expectations with long inputs that use `::` (#1472). # testthat 3.1.0 ## Snapshot tests * `expect_snapshot()` is no longer experimental. * `expect_snapshot()` and friends gets an experimental new `variant` argument which causes the snapshot to be saved in `_snaps/{variant}/{test}.md` instead of `_snaps/{test}.md`. This allows you to generate (and compare) unique snapshots for different scenarios like operating system or R version (#1143). * `expect_snapshot()` gains a `transform` argument, which should be a function that takes a character vector of lines and returns a modified character vector of lines. This makes it easy to remove sensitive (e.g. API keys) or stochastic (e.g. random temporary directory names) from snapshot output (#1345). * `expect_snapshot_file()` now replaces previous `.new` snapshot if code fails again with a different value. * `expect_snapshot_value()` now has an explicit `tolerance` argument which uses the testthat default, thus making it more like `expect_equal()` rather than `expect_identical()`. Set it to `NULL` if you want precise comparisons (#1309). `expect_snapshot_value(style = "deparse")` now works with negative values (#1342). * If a test containing multiple snapshots fails (or skips) in between snapshots, the later snapshots are now silently restored. (Previously this warned and reset all snapshots, not just later snapshots). * If you have multiple tests with the same name that use snapshots (not a good idea), you will no longer get a warning. Instead the snapshots will be aggregated across the tests. ## Breaking changes * Condition expectations now consistently return the expected condition instead of the return value (#1371). Previously, they would only return the condition if the return value was `NULL`, leading to inconsistent behaviour. This is a breaking change to the 3rd edition. Where you could previously do: ``` expect_equal(expect_warning(f(), "warning"), "value") ``` You must now use condition expectations on the outside: ``` expect_warning(expect_equal(f(), "value"), "warning") # Equivalently, save the value before inspection expect_warning(value <- f(), "warning") expect_equal(value, "value") ``` This breaking change makes testthat more consistent. It also makes it possible to inspect both the value and the warning, which would otherwise require additional tools. ## Minor improvements and bug fixes * Errors in test blocks now display the call if stored in the condition object (#1418). Uncaught errors now show their class (#1426). * Multi-line skips only show the first line in the skip summary. * `expr_label()`, which is used to concisely describe expressions used in expectations, now does a better job of summarising infix function (#1442). * `local_reproducible_output()` now sets the `max.print` option to 99999 (the default), so your tests are unaffected by any changes you might've made in your `.Rprofile` (1367). * `ProgressReporter` (the default only) now stops at the end of a file; this ensures that you see the results of all related tests, and ensures that snapshots are handled consistently (#1402). * `ProgressReporter` now uses an env var to adjust the maximum number of failures. This makes it easier to adjust when the tests are run in a subprocess, as is common when using RStudio (#1450). * `skip_on_os()` gains an `arch` argument so you can also choose to skip selected architectures (#1421). * `test_that()` now correctly errors when an expectation fails when run interactively (#1430). * `test_that()` now automatically and correctly generate an "empty test" skip if it only generates warnings or messages (and doesn't contain any expectations). * `testthat_tolerance()` no longer has an unused argument. # testthat 3.0.4 * The vendored Catch code used for `use_catch()` now uses a constant value for the stack size rather than relying on SIGSTKSZ. This fixes compatibility for recent glibc versions where SIGSTKSZ is no longer a constant. * Fixed an issue that caused errors and early termination of tests on R <= 3.6 when a failing condition expectation was signalled inside a snapshot. # testthat 3.0.3 * `expect_snapshot_file()` gains a `compare` argument (#1378, @nbenn). This is a customisation point for how to compare old and new snapshot files. The functions `compare_file_binary()` and `compare_file_text()` are now exported from testthat to be supplied as `compare` argument. These implement the same behaviour as the old `binary` argument which is now deprecated. * `expect_snapshot()` no longer deletes snapshots when an unexpected error occurs. * New `announce_snapshot_file()` function for developers of testthat extensions. Announcing a snapshot file allows testthat to preserve files that were not generated because of an unexpected error or a `skip()` (#1393). Unannounced files are automatically deleted during cleanup if the generating code isn't called. * New expectation: `expect_no_match()`. It complements `expect_match()` by checking if a string **doesn't match** a regular expression (@michaelquinn32, #1381). * Support setting the testthat edition via an environment variable (`TESTTHAT_EDITION`) as well (@michaelquinn32, #1386). # testthat 3.0.2 * Failing expectations now include a backtrace when they're not called directly from within `test_that()` but are instead wrapped in some helper function (#1307). * `CheckReporter` now only records warnings when not on CRAN. Otherwise failed CRAN revdep checks tend to be cluttered up with warnings (#1300). It automatically cleans up `testthat-problems.rds` left over from previous runs if the latest run is successful (#1314). * `expect_s3_class()` and `expect_s4_class()` can now check that an object _isn't_ an S3 or S4 object by supplying `NA` to the second argument (#1321). * `expect_s3_class()` and `expect_s4_class()` format class names in a less confusing way (#1322). * `expect_snapshot()` collapses multiple adjacent headings of the same, so that, e.g., if you have multiple lines of code in a row, you'll only see one "Code:" heading (#1311). # testthat 3.0.1 * New `testthat.progress.verbose_skips` option. Set to `FALSE` to stop reporting skips as they occur; they will still appear in the summary (#1209, @krlmlr). * `CheckReporter` results have been tweaked based on experiences from running R CMD check on many packages. Hopefully it should now be easier to see the biggest problems (i.e. failures and errors) while still having skips and warnings available to check if needed (#1274). And now the full test name is always shown, no matter how long (#1268). * Catch C++ tests are no longer reported multiple times (#1237) and are automatically skipped on Solaris since Catch is not supported (#1257). `use_catch()` makes it more clear that your package needs to suggest xml2 (#1235). * `auto_test_package()` works once again (@mbojan, #1211, #1214). * `expect_snapshot()` gains new `error` argument which controls whether or not an error is expected. If an unexpected error is thrown, or an expected error is not thrown, `expect_snapshot()` will fail (even on CRAN) (#1200). * `expect_snapshot_value(style = "deparse")` handles more common R data structures. * `expect_snapshot_value()` now passes `...` on to `waldo::compare()` (#1222). * `expect_snapshot_file()` gives a hint as to next steps when a failure occurs in non-interactive environments (with help from @maelle, #1179). `expect_snapshot_*()` gives a more informative hint when you're running tests interactively (#1226). * `expect_snapshot_*()` automatically removes the `_snaps` directory if it's empty (#1180). It also warns if snapshots are discarded because tests have duplicated names (#1278, @krlmlr). * `local_reproducible_output()` now sets the LANGUAGE env var to "en". This matches the behaviour of R CMD check in interactive settings (#1213). It also now unsets RSTUDIO envvar, instead of setting it to 0 (#1225). * `RstudioReporter` has been renamed to `RStudioReporter`. * `skip_if_not()` no longer appends "is not TRUE" to custom messages (@dpprdan, #1247). * `test_that()` now warns (3e only) if code doesn't have braces, since that makes it hard to track the source of an error (#1280, @krlmlr). # testthat 3.0.0 ## 3rd edition testhat 3.0.0 brings with it a 3rd edition that makes a number of breaking changes in order to clean up the interface and help you use our latest recommendations. To opt-in to the 3rd edition for your package, set `Config/testthat/edition: 3` in your `DESCRIPTION` or use `local_edition(3)` in individual tests. You can retrieve the active edition with `edition_get()`. Learn more in `vignette("third-edition")`. * `context()` is deprecated. * `expect_identical()` and `expect_equal()` use `waldo::compare()` to compare actual and expected results. This mostly yields much more informative output when the actual and expected values are different, but while writing it uncovered some bugs in the existing comparison code. * `expect_error()`, `expect_warning()`, `expect_message()`, and `expect_condition()` now all use the same underlying logic: they capture the first condition that matches `class`/`regexp` and allow anything else to bubble up (#998/#1052). They also warn if there are unexpected arguments that are never used. * The `all` argument to `expect_message()` and `expect_warning()` is now deprecated. It was never a particularly good idea or well documented, and is now superseded by the new condition capturing behaviour. * `expect_equivalent()`, `expect_reference()`, `expect_is()` and `expect_that()` are deprecated. * Messages are no longer automatically silenced. Either use `suppressMessages()` to hide unimportant messages, or `expect_message()` to catch important messages (#1095). * `setup()` and `teardown()` are deprecated in favour of test fixtures. See `vignette("test-fixtures")` for more details. * `expect_known_output()`, `expect_known_value()`, `expect_known_hash()`, and `expect_equal_to_reference()` are all deprecated in favour of `expect_snapshot_output()` and `expect_snapshot_value()`. * `test_that()` now sets a number of options and env vars to make output as reproducible as possible (#1044). Many of these options were previously set in various places (in `devtools::test()`, `test_dir()`, `test_file()`, or `verify_output()`) but they have now been centralised. You can use in your own code, or when debugging tests interactively with `local_test_context()`. * `with_mock()` and `local_mock()` are deprecated; please use the mockr or mockery packages instead (#1099). ## Snapshot testing New family of snapshot expectations (`expect_snapshot()`, `expect_snapshot_output()`, `expect_snapshot_error()`, and `expect_snapshot_value()`) provide "snapshot" tests, where the expected results are stored in separate files in `test/testthat/_snaps`. They're useful whenever it's painful to store expected results directly in the test files. `expect_snapshot_file()` along with `snapshot_review()` help snapshot more complex data, with initial support for text files, images, and data frames (#1050). See `vignette("snapshotting")` for more details. ## Reporters * `CheckReporter` (used inside R CMD check) now prints out all problems (i.e. errors, failures, warnings and skips; and not just the first 10), lists skips types, and records problems in machine readable format in `tests/testthat-problems.rds` (#1075). * New `CompactProgressReporter` tweaks the output of `ProgressReporter` for use with a single file, as in `devtools::test_file()`. You can pick a different default by setting `testthat.default_compact_reporter` to the name of a reporter. * `ProgressReporter` (the default reporter) now keeps the stack traces of an errors that happen before the before test, making problems substantially easier to track down (#1004). It checks if you've exceeded the maximum number of failures (from option `testthat.progress.max_fails`) after each expectation, rather than at the end of each file (#967). It also gains new random praise options that use emoji, and lists skipped tests by type (#1028). * `StopReporter` adds random praise emoji when a single test passes (#1094). It has more refined display of failures, now using the same style as `CompactProgressReporter` and `ProgressReporter`. * `SummaryReporter` now records file start, not just context start. This makes it more compatible with modern style which does not use `context()` (#1089). * All reporters now use exactly the same format when reporting the location of an expectation. * Warnings now include a backtrace, making it easier to figure out where they came from. * Catch C++ tests now provide detailed results for each test. To upgrade existing code, re-run `testthat::use_catch()` (#1008). * Many reporters (e.g. the check reporter) no longer raise an error when any tests fail. Use the `stop_on_failure` argument to `devtools::test()` and `testthat::test_dir()` if your code relies on this. Alternatively, use `reporter = c("check", "fail")` to e.g. create a failing check reporter. ## Fixtures * New `vignette("test-fixtures")` describes test fixtures; i.e. how to temporarily and cleanly change global state in order to test parts of your code that otherwise would be hard to run (#1042). `setup()` and `teardown()` are superseded in favour of test fixtures. * New `teardown_env()` for use with `withr::defer()`. This allows you to run code after all other tests have been run. ## Skips * New `vignette("skipping")` gives more general information on skipping tests, include some basics on testing skipping helpers (#1060). * `ProgressReporter()` and `CheckReporter()` list the number of skipped tests by reason at the end of the reporter. This makes it easier to check that you're not skipping the wrong tests, particularly on CI services (#1028). ## Test running * `test_that()` no longer triggers an error when run outside of tests; instead it produces a more informative summary of all failures, errors, warnings, and skips that occurred inside the test. * `test_that()` now errors if `desc` is not a string (#1161). * `test_file()` now runs helper, setup, and teardown code, and has the same arguments as `test_dir()` (#968). Long deprecated `encoding` argument has been removed. * `test_dir()` now defaults `stop_on_failure` to `TRUE` for consistency with other `test_` functions. The `wrap` argument has been deprecated; it's not clear that it should ever have been exposed. * New `test_local()` tests a local source package directory. It's equivalent to `devtools::test()` but doesn't require devtools and all its dependencies to be installed (#1030). ## Minor improvements and bug fixes * testthat no longer supports tests stored in `inst/tests`. This has been deprecated since testthat 0.11.0 (released in 2015). `test_package()` (previously used for running tests in R CMD check) will fail silently if no tests are found to avoid breaking old packages on CRAN (#1149). * `capture_output()` and `verify_output()` use a new `testthat_print()` generic. This allows you to control the printed representation of your object specifically for tests (i.e. if your usual print method shows data that varies in a way that you don't care about for tests) (#1056). * `context_start_file()` is now exported for external reporters (#983, #1082). It now only strips first instance of prefix/suffix (#1041, @stufield). * `expect_error()` no longer encourages you to use `class`. This advice removes one type of fragility at the expense of creating a different type (#1013). * `expect_known_failure()` has been removed. As far as I can tell it was only ever used by testthat, and is rather fragile. * `expect_true()`, `expect_false()`, and `expect_null()` now use waldo to produce more informative failures. * `verify_output()` no longer always fails if output contains a carriage return character ("\r") (#1048). It uses the `pdf()` device instead of `png()` so it works on systems without X11 (#1011). And it uses `waldo::compare()` to give more informative failures. # testthat 2.3.2 * Fix R CMD check issues # testthat 2.3.1 * The last version of testthat introduced a performance regression in error assertions (#963). To fix it, you need to install rlang 0.4.2. * Fixed error assertions with rJava errors (#964). * Fixed issue where error and warning messages were not retrieved with `conditionMessage()` under certain circumstances. # testthat 2.3.0 ## Conditions This release mostly focusses on an overhaul of how testthat works with conditions (i.e. errors, warnings and messages). There are relatively few user-facing changes, although you should now see more informative backtraces from errors and failures. * Unexpected errors are now printed with a simplified backtrace. * `expect_error()` and `expect_condition()` now display a backtrace when the error doesn't conform to expectations (#729). * `expect_error()`, `expect_warning()` and `expect_message()` now call `conditionMessage()` to get the condition message. This generic makes it possible to generate messages at print-time rather than signal-time. * `expect_error()` gets a better warning message when you test for a custom error class with `regexp`. * New `exp_signal()` function is a condition signaller that implements the testthat protocol (signal with `stop()` if the expectation is broken, with a `muffle_expectation` restart). * Existence of restarts is first checked before invocation. This makes it possible to signal warnings or messages with a different condition signaller (#874). * `ListReporter` now tracks expectations and errors, even when they occur outside of tests. This ensures that `stop_on_failure` matches the results displayed by the reporter (#936). * You can silence warnings about untested error classes by implementing a method for `is_uninformative_warning()`. This method should be lazily registered, e.g. with `vctrs::s3_register()`. This is useful for introducing an experimental error class without encouraging users to depend on the class in their tests. * Respect options(warn = -1) to ignore all warnings (@jeroen #958). ## Expectations * Expectations can now be explicitly subclassed with `new_expectation()`. This constructor follows our new conventions for S3 classes and takes an optional subclass and optional attributes. * Unquoted inputs no longer potentially generate multiple test messages (#929). * `verify_output()` no longer uses quasiquotation, which fixes issues when verifying the output of tidy eval functions (#945). * `verify_output()` gains a `unicode` parameter to turn on or off the use of Unicode characters by the cli package. It is disabled by default to prevent the tests from failing on platforms like Windows that don't support UTF-8 (which could be your contributors' or your CI machines). * `verify_output()` now correctly handles multi-line condition messages. * `verify_output()` now adds spacing after condition messages, consistent with the spacing added after normal output. * `verify_output()` has a new syntax for inserting headers in output files: insert a `"# Header"` string (starting with `#` as in Markdown) to add a header to a set of outputs. ## Other minor improvements and bug fixes * `compare.numeric()` uses a more sophisticated default tolerance that will automatically skip tests that rely on numeric tolerance if long doubles are not available (#940). * `JunitReporter` now reports tests in ISO 8601 in the UTC timezone and uses the maximum precision of 3 decimal places (#923). # testthat 2.2.1 * Repair regression in `test_rd()` and add a couple of tests to hopefully detect the problem earlier in the future. # testthat 2.2.0 ## New features * New `verify_output()` is designed for testing output aimed at humans (most commonly print methods and error messages). It is a regression test that saves output in a way that makes it easy to review. It is automatically skipped on CRAN (#782, #834). ## Minor improvements and bug fixes * `as.data.frame.testthat_results()` now always returns a data frame with 13 columns (@jozefhajnala, #887). * `auto_test_package()` now correctly handles helper files (`tests/testthat/helper-*.R`), automatically reloading all code and rerunning all tests (@CorradoLanera, #376, #896). * `expect_match()` now displays `info` even when match length is 0 (#867). * `expect_s3_class()` gains new `exact` argument that allows you to check for an exact class match, not just inheritance (#885). * `fail()` and `succeed()` gain `info` argument, which is passed along to `expect()`. * `test_examples()` gets some minor fixes: it now returns the results invisibly, doesn't assume that examples should contain tests, and documents that you shouldn't be using it routinely (#841). * `test_file()` only calls `Reporter$end_context()` if a context was started, fixing an error in `TeamcityReporter` (@atheriel, #883). * `skip()` now reports reason for skipping as: `Reason: {skip condition}` (@patr1ckm, #868). * `skip_if()` and `skip_if_not()` now report `Reason: {skip condition} is TRUE` and `Reason: {skip condition} is not TRUE` respectively (@ patr1ckm, #868). * `skip_if_translated()` now tests for translation of a specific message. This is more robust than the previous approach because translation happens message-by-message, not necessarily for the entire session (#879) (and in general, it's impossible to determine what language R is currently using). * `skip_on_covr()` allows you to skip tests when covr is running. (@ianmcook, #895) * `expect_known_value()` gains a new serialisation `version` argument, defaulting to 2. Prevents the `.rds` files created to hold reference objects from making a package appear to require R >= 3.5 (#888 @jennybc). # testthat 2.1.1 * Fix test failures in strict latin1 locale # testthat 2.1.0 ## New expectations * New `expect_visible()` and `expect_invisible()` make it easier to check if a function call returns its result visibly or invisibly (#719). * New `expect_mapequal(x, y)` checks that `x` and `y` have the same names, and the same value associated with each name (i.e. they compare the values of the vector standardising the order of the names) (#863). * New `expect_vector()` is a wrapper around `vctrs::vec_assert()` making it easy to test against the vctrs definitions of prototype and size (#846). (Currently requires development version of vctrs.) ## Improvements to existing expectations * All expectations give clearer error messages if you forget the `object` or `expected` arguments (#743). * `expect_equal()` now correctly compares infinite values (#789). * In `expect_equal_to_reference()`, the default value for `update` is now `FALSE` (@BrodieG, #683). * `expect_error()` now returns the error object as documented (#724). It also now warns if you're using a classed expectation and you're not using the `class` argument. This is good practice as it decouples the error object (which tends to be stable) from its rendering to the user (which tends to be fragile) (#816). * `expect_identical()` gains a `...` argument to pass additional arguments down to `identical()` (#714). * `expect_lt()`, `expect_lte()`, `expect_gt()` `expect_gte()` now handle `Inf` and `NA` arguments appropriately (#732), and no longer require the inputs to be numeric. * `expect_output()` gains a `width` argument, allowing you to control the output width. This does not inherit from `getOption("width")`, ensuring that tests return the same results regardless of environment (#805). * `expect_setequal()` now works with more vector types (including lists), because it uses `%in%`, rather than `sort()`. It also warns if the inputs are named, as this suggests that your mental model of how `expect_setequal()` works is wrong (#750). * `is_true()` and `is_false()` have been deprecated because they conflict with other functions in the tidyverse. ## Reporters * Reporter documentation has been considerably improved (#657). * `CheckReporter`, used by R CMD check, now includes a count of warnings. * `JUnitReporter` no longer replaces `.` in class names (#753), and creates output that should be more compatible with Jenkins (#806, @comicfans). * `ListReporter` now records number of passed tests and original results in new columns (#675). * `ProgressReporter`, the default reporter, now: * Automatically generates a context from the file name. We no longer recommend the use of `context()` and instead encourage you to delete it, allowing the context to be autogenerated from the file name. This also eliminates the error that occurred if tests can before the first `context()` (#700, #705). * Gains a `update_interval` parameter to control how often updates are printed (default 0.1 s). This prevents large printing overhead for very fast tests. (#701, @jimhester) * Uses a 3 character wide column to display test successes, so up to 999 successful tests can be displayed without changing the alignment (#712). * `reporter$end_reporter()` is now only called when testing completes successfully. This ensures that you don't get unnecessary output when the test fails partway through (#727). ## Skips * `skip_if_offline()` skips tests if an internet connection is not available (#685). * `skip_on_ci()` skips tests on continuous integration systems (@mbjoseph, #825) by looking for a `CI` env var.. ## Other new features * New `testthat_examples()` and `testthat_example()` make it easy to access new test files bundled with the package. These are used in various examples to make it easier to understand how to use the package. * New `local_mock()` which allows you to mock a function without having to add an additional layer of indentation as with `with_mock()` (#856). ## Other minor improvements and bug fixes * `auto_test_package()` works better with recent devtools and also watches `src/` for changes (#809). * `expect_s3_class()` now works with unquoting (@jalsalam, #771). * `expectation` objects now contain the failure message, even when successful (#836) * `devtools::test()` no longer fails if run multiple times within the same R session for a package containing Catch tests. ([devtools #1832](https://github.com/r-lib/devtools/issues/1832)) * New `testing_package()` retrieves the name of the package currently being tested (#699). * `run_testthat_tests` C entrypoint is registered more robustly. * `skip()` now always produces a `message` of length 1, as expected elsewhere in testthat (#791). * Warnings are passed through even when `options(warn = 2)` is set (@yutannihilation, #721). # testthat 2.0.1 * Fix failing tests with devtools 2.0.0 # testthat 2.0.0 ## Breaking API changes * "Can't mock functions in base packages": You can no longer use `with_mock()` to mock functions in base packages, because this no longer works in R-devel due to changes with the byte code compiler. I recommend using [mockery](https://github.com/r-lib/mockery) or [mockr](https://github.com/krlmlr/mockr) instead. * The order of arguments to `expect_equivalent()` and `expect_error()` has changed slightly as both now pass `...` on another function. This reveals itself with a number of different errors, like: * 'what' must be a character vector * 'check.attributes' must be logical * 'tolerance' should be numeric * argument is not interpretable as logical * threw an error with unexpected class * argument "quo" is missing, with no default * argument is missing, with no default If you see one of these errors, check the number, order, and names of arguments to the expectation. * "Failure: (unknown)". The last release mistakenly failed to test bare expectations not wrapped inside `test_that()`. If you see "(unknown)" in a failure message, this is a failing expectation that you previously weren't seeing. As well as fixing the failure, please also wrap inside a `test_that()` with an informative name. * "Error: the argument has already been evaluated": the way in which expectations now need create labels has changed, which caused a couple of failures with unusual usage when combined with `Reduce`, `lapply()`, and `Map()`. Avoid these functions in favour of for loops. I also recommend reading the section below on quasiquotation support in order to create more informative failure messages. ## Expectations ### New and improved expectations * `expect_condition()` works like `expect_error()` but captures any condition, not just error conditions (#621). * `expect_error()` gains a `class` argument that allows you to make an assertion about the class of the error object (#530). * `expect_reference()` checks if two names point to the same object (#622). * `expect_setequal()` compares two sets (stored in vectors), ignoring duplicates and differences in order (#528). ### New and improved skips * `skip_if()` makes it easy to skip a test when a condition is true (#571). For example, use `skip_if(getRversion() <= 3.1)` to skip a test in older R versions. * `skip_if_translated()` skips tests if you're running in an locale where translations are likely to occur (#565). Use this to avoid spurious failures when checking the text of error messages in non-English locales. * `skip_if_not_installed()` gains new `minimum_version` argument (#487, #499). ### Known good values We have identified a useful family of expectations that compares the results of an expression to a known good value stored in a file. They are designed to be use in conjunction with git so that you can see what precisely has changed, and revert it if needed. * `expect_known_output()` replaces `expect_output_file()`, which has been soft-deprecated. It now defaults to `update = TRUE` and warn, rather than failing on the first run. It gains a `print` argument to automatically print the input (#627). It also sets the width option to 80 to ensure consistent output across environments (#514) * `expect_known_value()` replaces `expect_equal_to_reference()`, which has been soft-deprecated. It gains an update argument defaulting to `TRUE`. This changes behaviour from the previous version, and soft-deprecated `expect_equal_to_reference()` gets `update = FALSE`. * `expect_known_failure()` stored and compares the failure message from an expectation. It's a useful regression test when developing informative failure messages for your own expectations. ### Quasiquotation support All expectations can now use unquoting (#626). This makes it much easier to generate informative failure messages when running tests in a for loop. For example take this test: ```R f <- function(i) if (i > 3) i * 9 else i * 10 for (i in 1:5) { expect_equal(f(i), i * 10) } ``` When it fails, you'll see the message ``Error: `f(i)` not equal to `i * 10` ``. That's hard to diagnose because you don't know which iteration caused the problem! ```R for (i in 1:5) { expect_equal(f(!!i), !!(i * 10)) } ``` If you unquote the values using `!!`, you get the failure message `` `f(4L)` not equal to 40.``. This is much easier to diagnose! See `?quasi_label()` for more details. (Note that this is not tidy evaluation per se, but is closely related. At this time you can not unquote quosures.) ## New features ### Setup and teardown * New `setup()` and `teardown()` functions allow you to run at the start and end of each test file. This is useful if you want to pair cleanup code with the code that messes up state (#536). * Two new prefixes are recognised in the `test/` directory. Files starting with `setup` are run before tests (but unlike `helpers` are not run in `devtools::load_all()`). Files starting with `teardown` are run after all tests are completed (#589). ### Other new features * All files are now read and written as UTF-8 (#510, #605). * `is_testing()` allows you to tell if your code is being run inside a testing environment (#631). Rather than taking a run-time dependency on testthat you may want to inline the function into your own package: ```R is_testing <- function() { identical(Sys.getenv("TESTTHAT"), "true") } ``` It's frequently useful to combine with `interactive()`. ### New default reporter A new default reporter, `ReporterProgress`, produces more aesthetically pleasing output and makes the most important information available upfront (#529). You can return to the previous default by setting `options(testthat.default_reporter = "summary")`. ### Reporters * Output colours have been tweaked to be consistent with clang: warnings are now in magenta, and skips in blue. * New `default_reporter()` and `check_reporter()` which returns the default reporters for interactive and check environments (#504). * New `DebugReporter` that calls a better version of `recover()` in case of failures, errors, or warnings (#360, #470). * New `JunitReporter` generates reports in JUnit compatible format. (#481, @lbartnik; #640, @nealrichardson; #575) * New `LocationReporter` which just prints the location of every expectation. This is useful for locating segfaults and C/C++ breakpoints (#551). * `SummaryReporter` received a number of smaller tweaks * Aborts testing as soon the limit given by the option `testthat.summary.max_reports` (default 10) is reached (#520). * New option `testthat.summary.omit_dots = TRUE` hides the progress dots speeding up tests by a small amount (#502). * Bring back random praise and encouragement which I accidentally dropped (#478). * New option `testthat.default_check_reporter`, defaults to `"check"`. Continuous Integration system can set this option before evaluating package test sources in order to direct test result details to known location. * All reporters now accept a `file` argument on initialization. If provided, reporters will write the test results to that path. This output destination can also be controlled with the option `testthat.output_file` (#635, @nealrichardson). ## Deprecated functions * `is_null()` and `matches()` have been deprecated because they conflict with other functions in the tidyverse (#523). ## Minor improvements and bug fixes * Updated Catch to 1.9.6. `testthat` now understands and makes use of the package routine registration mechanism required by CRAN with R >= 3.4.0. (@kevinushey) * Better reporting for deeply nested failures, limiting the stack trace to the first and last 10 entries (#474). * Bare expectations notify the reporter once again. This is achieved by running all tests inside `test_code()` by default (#427, #498). This behaviour can be overridden by setting `wrap = FALSE` in `test_dir()` and friends (#586). * `auto_test()` and `auto_test_package()` provide `hash` parameter to enable switching to faster, time-stamp-based modification detection (#598, @katrinleinweber). `auto_test_package()` works correctly on windows (#465). * `capture_output_lines()` is now exported (#504). * `compare.character()` works correctly for vectors of length > 5 (#513, @brodieG) * `compare.default()` gains a `max_diffs` argument and defaults to printing out only the first 9 differences (#538). * `compare.numeric()` respects `check.attributes()` so `expect_equivalent()` correctly ignores attributes of numeric vectors (#485). * Output expectations (`expect_output()`, `expect_message()`, `expect_warning()`, and `expect_silent()`) all invisibly return the first argument to be consistent with the other expectations (#615). * `expect_length()` works with any object that has a `length` method, not just vectors (#564, @nealrichardson) * `expect_match()` now accepts explicit `perl` and `fixed` arguments, and adapts the failure message to the value of `fixed`. This also affects other expectations that forward to `expect_match()`, like `expect_output()`, `expect_message()`, `expect_warning()`, and `expect_error()`. * `expect_match()` escapes special regular expression characters when printing (#522, @jimhester). * `expect_message()`, `expect_warning()` and `expect_error()` produce clearer failure messages. * `find_test_scripts()` only looks for `\.[rR]` in the extension (#492, @brodieG) * `test_dir()`, `test_package()`, `test_check()` unset the `R_TESTS` env var (#603) * `test_examples()` now works with installed packages as well as source packages (@jimhester, #532). * `test_dir()`, `test_package()`, and `test_check()` gain `stop_on_failure` and `stop_on_waring` arguments that control whether or not an error is signalled if any tests fail or generate warnings (#609, #619). * `test_file()` now triggers a `gc()` after tests are run. This helps to ensure that finalisers are run earlier (#535). * `test_path()` now generates correct path when called from within `tools::testInstalledPackage()` (#542). * `test_path()` no longer assumes that the path exists (#448). * `test_that()` calls without any expectations generate a default `skip()` (#413). * `test_dir()` gains `load_helpers` argument (#505). * `show_failures()` simply prints a failure if it occurs. This makes it easier to show failures in examples. * `with_mock()` disallows mocking of functions in base packages, because this doesn't work with the current development version of R (#553). # testthat 1.0.2 * Ensure `std::logic_error()` constructed with `std::string()` argument, to avoid build errors on Solaris. # testthat 1.0.1 * New `expect_output_file()` to compare output of a function with a text file, and optionally update it (#443, @krlmlr). * Properly scoped use + compilation of C++ unit testing code using Catch to `gcc` and `clang` only, as Catch includes code that does not strictly conform to the C++98 standard. (@kevinushey) * Fixed an out-of-bounds memory access when routing Catch output through `Rprintf()`. (@kevinushey) * Ensure that unit tests run on R-oldrel (remove use of `dir.exists()`). (@kevinushey) * Improved overriding of calls to `exit()` within Catch, to ensure compatibility with GCC 6.0. (@krlmlr) * Hardened formatting of difference messages, previously the presence of `%` characters could affect the output (#446, @krlmlr). * Fixed errors in `expect_equal()` when comparing numeric vectors with and without attributes (#453, @krlmlr). * `auto_test()` and `auto_test_package()` show only the results of the current test run and not of previously failed runs (#456, @krlmlr). # testthat 1.0.0 ## Breaking changes The `expectation()` function now expects an expectation type (one of "success", "failure", "error", "skip", "warning") as first argument. If you're creating your own expectations, you'll need to use `expect()` instead (#437). ## New expectations The expectation system got a thorough overhaul (#217). This primarily makes it easier to add new expectations in the future, but also included a thorough review of the documentation, ensuring that related expectations are documented together, and have evocative names. One useful change is that most expectations invisibly return the input `object`. This makes it possible to chain together expectations with magrittr: ```R factor("a") %>% expect_type("integer") %>% expect_s3_class("factor") %>% expect_length(1) ``` (And to make this style even easier, testthat now re-exports the pipe, #412). The exception to this rule are the expectations that evaluate (i.e. for messages, warnings, errors, output etc), which invisibly return `NULL`. These functions are now more consistent: using `NA` will cause a failure if there is a errors/warnings/messages/output (i.e. they're not missing), and will `NULL` fail if there aren't any errors/warnings/messages/output. This previously didn't work for `expect_output()` (#323), and the error messages were confusing with `expect_error(..., NA)` (#342, @nealrichardson + @krlmlr, #317). Another change is that `expect_output()` now requires you to explicitly print the output if you want to test a print method: `expect_output("a", "a")` will fail, `expect_output(print("a"), "a")` will succeed. There are six new expectations: * `expect_type()` checks the _type_ of the object (#316), `expect_s3_class()` tests that an object is S3 with given class, `expect_s4_class()` tests that an object is S4 with given class (#373). I recommend using these more specific expectations instead of the more general `expect_is()`. * `expect_length()` checks that an object has expected length. * `expect_success()` and `expect_failure()` are new expectations designed specifically for testing other expectations (#368). A number of older features have been deprecated: * `expect_more_than()` and `expect_less_than()` have been deprecated. Please use `expect_gt()` and `expect_lt()` instead. * `takes_less_than()` has been deprecated. * `not()` has been deprecated. Please use the explicit individual forms `expect_error(..., NA)` , `expect_warning(.., NA)` and so on. ## Expectations are conditions Now all expectations are also conditions, and R's condition system is used to signal failures and successes (#360, @krlmlr). All known conditions (currently, "error", "warning", "message", "failure", and "success") are converted to expectations using the new `as.expectation()`. This allows third-party test packages (such as `assertthat`, `testit`, `ensurer`, `checkmate`, `assertive`) to seamlessly establish `testthat` compatibility by issuing custom error conditions (e.g., `structure(list(message = "Error message"), class = c("customError", "error", "condition"))`) and then implementing `as.expectation.customError()`. The `assertthat` package contains an example. ## Reporters The reporters system class has been considerably refactored to make existing reporters simpler and to make it easier to write new reporters. There are two main changes: * Reporters classes are now R6 classes instead of Reference Classes. * Each callbacks receive the full context: * `add_results()` is passed context and test as well as the expectation. * `test_start()` and `test_end()` both get the context and test. * `context_start()` and `context_end()` get the context. * Warnings are now captured and reported in most reporters. * The reporter output goes to the original standard output and is not affected by `sink()` and `expect_output()` (#420, @krlmlr). * The default summary reporter lists all warnings (#310), and all skipped tests (@krlmlr, #343). New option `testthat.summary.max_reports` limits the number of reports printed by the summary reporter. The default is 15 (@krlmlr, #354). * `MinimalReporter` correct labels errors with E and failures with F (#311). * New `FailReporter` to stop in case of failures or errors after all tests (#308, @krlmlr). ## Other * New functions `capture_output()`, `capture_message()`, and `capture_warnings()` selectively capture function output. These are used in `expect_output()`, `expect_message()` and `expect_warning()` to allow other types out output to percolate up (#410). * `try_again()` allows you to retry code multiple times until it succeeds (#240). * `test_file()`, `test_check()`, and `test_package()` now attach testthat so all testing functions are available. * `source_test_helpers()` gets a useful default path: the testthat tests directory. It defaults to the `test_env()` to be consistent with the other source functions (#415). * `test_file()` now loads helpers in the test directory before running the tests (#350). * `test_path()` makes it possible to create paths to files in `tests/testthat` that work interactively and when called from tests (#345). * Add `skip_if_not()` helper. * Add `skip_on_bioc()` helper (@thomasp85). * `make_expectation()` uses `expect_equal()`. * `setup_test_dir()` has been removed. If you used it previously, instead use `source_test_helpers()` and `find_test_scripts()`. * `source_file()` exports the function testthat uses to load files from disk. * `test_that()` returns a `logical` that indicates if all tests were successful (#360, @krlmlr). * `find_reporter()` (and also all high-level testing functions) support a vector of reporters. For more than one reporter, a `MultiReporter` is created (#307, @krlmlr). * `with_reporter()` is used internally and gains new argument `start_end_reporter = TRUE` (@krlmlr, 355). * `set_reporter()` returns old reporter invisibly (#358, @krlmlr). * Comparing integers to non-numbers doesn't raise errors anymore, and falls back to string comparison if objects have different lengths. Complex numbers are compared using the same routine (#309, @krlmlr). * `compare.numeric()` and `compare.character()` received another overhaul. This should improve behaviour of edge cases, and provides a strong foundation for further work. Added `compare.POSIXt()` for better reporting of datetime differences. * `expect_identical()` and `is_identical_to()` now use `compare()` for more detailed output of differences (#319, @krlmlr). * Added [Catch](https://github.com/catchorg/Catch2) v1.2.1 for unit testing of C++ code. See `?use_catch()` for more details. (@kevinushey) # testthat 0.11.0 * Handle skipped tests in the TAP reporter (#262). * New `expect_silent()` ensures that code produces no output, messages, or warnings (#261). * New `expect_lt()`, `expect_lte()`, `expect_gt()` and `expect_gte()` for comparison with or without equality (#305, @krlmlr). * `expect_output()`, `expect_message()`, `expect_warning()`, and `expect_error()` now accept `NA` as the second argument to indicate that output, messages, warnings, and errors should be absent (#219). * Praise gets more diverse thanks to the praise package, and you'll now get random encouragement if your tests don't pass. * testthat no longer muffles warning messages. If you don't want to see them in your output, you need to explicitly quiet them, or use an expectation that captures them (e.g. `expect_warning()`). (#254) * Use tests in `inst/tests` is formally deprecated. Please move them into `tests/testthat` instead (#231). * `expect_match()` now encodes the match, as well as the output, in the expectation message (#232). * `expect_is()` gives better failure message when testing multiple inheritance, e.g. `expect_is(1:10, c("glm", "lm"))` (#293). * Corrected argument order in `compare.numeric()` (#294). * `comparison()` constructure now checks its arguments are the correct type and length. This bugs a bug where tests failed with an error like "values must be length 1, but FUN(X[[1]]) result is length 2" (#279). * Added `skip_on_os()`, to skip tests on specified operating systems (@kevinushey). * Skip test that depends on `devtools` if it is not installed (#247, @krlmlr) * Added `skip_on_appveyor()` to skip tests on Appveyor (@lmullen). * `compare()` shows detailed output of differences for character vectors of different length (#274, @krlmlr). * Detailed output from `expect_equal()` doesn't confuse expected and actual values anymore (#274, @krlmlr). # testthat 0.10.0 * Failure locations are now formatted as R error locations. * Add an 'invert' argument to `find_tests_scripts()`. This allows one to select only tests which do _not_ match a pattern. (#239, @jimhester). * Deprecated `library_if_available()` has been removed. * test (`test_dir()`, `test_file()`, `test_package()`, `test_check()`) functions now return a `testthat_results` object that contains all results, and can be printed or converted to data frame. * `test_dir()`, `test_package()`, and `test_check()` have an added `...` argument that allows filtering of test files using, e.g., Perl-style regular expressions,or `fixed` character filtering. Arguments in `...` are passed to `grepl()` (@leeper). * `test_check()` uses a new reporter specifically designed for `R CMD check`. It displays a summary at the end of the tests, designed to be <13 lines long so test failures in `R CMD check` display something more useful. This will hopefully stop BDR from calling testthat a "test obfuscation suite" (#201). * `compare()` is now documented and exported. Added a numeric method so when long numeric vectors don't match you'll see some examples of where the problem is (#177). The line spacing in `compare.character()` was tweaked. * `skip_if_not_installed()` skips tests if a package isn't installed (#192). * `expect_that(a, equals(b))` style of testing has been soft-deprecated. It will keep working, but it's no longer demonstrated any where, and new expectations will only be available in `expect_equal(a, b)` style. (#172) * Once again, testthat suppresses messages and warnings in tests (#189) * New `test_examples()` lets you run package examples as tests. Each example counts as one expectation and it succeeds if the code runs without errors (#204). * New `succeed()` expectation always succeeds. * `skip_on_travis()` allows you to skip tests when run on Travis CI. (Thanks to @mllg) * `colourise()` was removed. (Colour is still supported, via the `crayon` package.) * Mocks can now access values local to the call of `with_mock` (#193, @krlmlr). * All equality expectations are now documented together (#173); all matching expectations are also documented together. # testthat 0.9.1 * Bump R version dependency # testthat 0.9 ## New features * BDD: testhat now comes with an initial behaviour driven development (BDD) interface. The language is similar to RSpec for Ruby or Mocha for JavaScript. BDD tests read like sentences, so they should make it easier to understand the specification of a function. See `?describe()` for further information and examples. * It's now possible to `skip()` a test with an informative message - this is useful when tests are only available under certain conditions, as when not on CRAN, or when an internet connection is available (#141). * `skip_on_cran()` allows you to skip tests when run on CRAN. To take advantage of this code, you'll need either to use devtools, or run `Sys.setenv(NOT_CRAN = "true"))` * Simple mocking: `with_mock()` makes it easy to temporarily replace functions defined in packages. This is useful for testing code that relies on functions that are slow, have unintended side effects or access resources that may not be available when testing (#159, @krlmlr). * A new expectation, `expect_equal_to_reference()` has been added. It tests for equality to a reference value stored in a file (#148, @jonclayden). ## Minor improvements and bug fixes * `auto_test_package()` works once more, and now uses `devtools::load_all()` for higher fidelity loading (#138, #151). * Bug in `compare.character()` fixed, as reported by Georgi Boshnakov. * `colourise()` now uses option `testthat.use_colours` (default: `TRUE`). If it is `FALSE`, output is not colourised (#153, @mbojan). * `is_identical_to()` only calls `all.equal()` to generate an informative error message if the two objects are not identical (#165). * `safe_digest()` uses a better strategy, and returns NA for directories (#138, #146). * Random praise is re-enabled by default (again!) (#164). * Teamcity reporter now correctly escapes output messages (#150, @windelinckx). It also uses nested suites to include test names. ## Deprecated functions * `library_if_available()` has been deprecated. # testthat 0.8.1 * Better default environment for `test_check()` and `test_package()` which allows S4 class creation in tests * `compare.character()` no longer fails when one value is missing. # testthat 0.8 testthat 0.8 comes with a new recommended structure for storing your tests. To better meet CRAN recommended practices, testthat now recommend that you to put your tests in `tests/testthat`, instead of `inst/tests` (this makes it possible for users to choose whether or not to install tests). With this new structure, you'll need to use `test_check()` instead of `test_packages()` in the test file (usually `tests/testthat.R`) that runs all testthat unit tests. The other big improvement to usability comes from @kforner, who contributed code to allow the default results (i.e. those produced by `SummaryReporter`) to include source references so you can see exactly where failures occurred. ## New reporters * `MultiReporter`, which combines several reporters into one. (Thanks to @kforner) * `ListReporter`, which captures all test results with their file, context, test and elapsed time. `test_dir`, `test_file`, `test_package` and `test_check` now use the `ListReporter` to invisibly return a summary of the tests as a data frame. (Thanks to @kforner) * `TeamCityReporter` to produce output compatible with the TeamCity continuous integration environment. (Thanks to @windelinckx) * `SilentReporter` so that `testthat` can test calls to `test_that`. (Thanks to @craigcitro, #83) ## New expectations * `expect_null()` and `is_null` to check if an object is NULL (#78) * `expect_named()` and `has_names()` to check the names of a vector (#79) * `expect_more_than()`, `is_more_than()`, `expect_less_than()`, `is_less_than()` to check values above or below a threshold. (#77, thanks to @jknowles) ## Minor improvements and bug fixes * `expect_that()` (and thus all `expect_*` functions) now invisibly return the expectation result, and stops if info or label arguments have length > 1 (thanks to @kforner) * fixed two bugs with source_dir(): it did not look for the source scripts at the right place, and it did not use its `chdir` argument. * When using `expect_equal()` to compare strings, the default output for failure provides a lot more information, which should hopefully help make finding string mismatches easier. * `SummaryReporter` has a `max_reports` option to limit the number of detailed failure reports to show. (Thanks to @crowding) * Tracebacks will now also contain information about where the functions came from (where that information is available). * `matches` and `expect_match` now pass additional arguments on to `grepl` so that you can use `fixed = TRUE`, `perl = TRUE` or `ignore.case = TRUE` to control details of the match. `expect_match` now correctly fails to match NULL. (#100) * `expect_output`, `expect_message`, `expect_warning` and `expect_error` also pass ... on to `grepl`, so that you can use `fixed = TRUE`, `perl = TRUE` or `ignore.case = TRUE` * Removed `stringr` and `evaluate` dependencies. * The `not()` function makes it possible to negate tests. For example, `expect_that(f(), not(throws_error()))` asserts that `f()` does not throw an error. * Make `dir_state` less race-y. (Thanks to @craigcitro, #80) * `auto_test` now pays attention to its 'reporter' argument (Thanks to @crowding, #81) * `get_reporter()`, `set_reporter()` and `with_reporter()` are now exported (#102) # testthat 0.7.1 * Ignore attributes in `is_true` and `is_false` (#49) * `make_expectation` works for more types of input (#52) * Now works better with evaluate 0.4.3. * new `fail()` function always forces a failure in a test. Suggested by Richie Cotton (#47) * Added `TapReporter` to produce output compatible with the "test anything protocol". Contributed by Dan Keshet. * Fixed where `auto_test` would identify the wrong files as having changed. (Thanks to Peter Meilstrup) # testthat 0.7 * `SummaryReporter`: still return informative messages even if no tests defined (just bare expectations). (Fixes #31) * Improvements to reference classes (Thanks to John Chambers) * Bug fixes for when nothing was generated in `gives_warning` / `shows_message`. (Thanks to Bernd Bischl) * New `make_expectation` function to programmatically generate an equality expectation. (Fixes #24) * `SummaryReporter`: You don't get praise until you have some tests. * Depend on `methods` rather than requiring it so that testthat works when run from `Rscript` * `auto_test` now normalises paths to enable better identification of file changes, and fixes bug in instantiating new reporter object. # testthat 0.6 * All `mutatr` classes have been replaced with ReferenceClasses. * Better documentation for short-hand expectations. * `test_dir` and `test_package` gain new `filter` argument which allows you to restrict which tests are run. # testthat 0.5 * bare expectations now correctly throw errors again # testthat 0.4 * autotest correctly loads code and executes tests in same environment * contexts are never closed before they are opened, and always closed at the end of file * fixed small bug in `test_dir` where each test was not given its own environment * all `expect_*` short cut functions gain a label argument, thanks to Steve Lianoglou # testthat 0.3 * all expectations now have a shortcut form, so instead of expect_that(a, is_identical_to(b)) you can do expect_identical(a, b) * new shows_message and gives_warning expectations to test warnings and messages * expect_that, equals, is_identical_to and is_equivalent to now have additional label argument which allows you to control the appearance of the text used for the expected object (for expect_that) and actual object (for all other functions) in failure messages. This is useful when you have loops that run tests as otherwise all the variable names are identical, and it's difficult to tell which iteration caused the failure. * executing bare tests gives nicer output * all expectations now give more information on failure to make it easier to track down the problem. * test_file and test_dir now run in code in separate environment to avoid pollution of global environment. They also temporary change the working directory so tests can use relative paths. * test_package makes it easier to run all tests in an installed package. Code run in this manner has access to non-exported functions and objects. If any errors or failures occur, test_package will throw an error, making it suitable for use with R CMD check. # testthat 0.2 * colourise also works in screen terminal * equals expectation provides more information about failure * expect_that has extra info argument to allow you to pass in any extra information you'd like included in the message - this is very helpful if you're using a loop to run tests * is_equivalent_to: new expectation that tests for equality ignoring attributes * library_if_available now works! (thanks to report and fix from Felix Andrews) * specify larger width and join pieces back together whenever deparse used (thanks to report and fix from Felix Andrews) * test_dir now looks for any files starting with test (not test- as before) testthat/inst/0000755000176200001440000000000015130237654013075 5ustar liggesuserstestthat/inst/include/0000755000176200001440000000000013167703257014525 5ustar liggesuserstestthat/inst/include/testthat.h0000644000176200001440000000003713167703257016536 0ustar liggesusers#include testthat/inst/include/testthat/0000755000176200001440000000000015130664352016357 5ustar liggesuserstestthat/inst/include/testthat/vendor/0000755000176200001440000000000015047715224017656 5ustar liggesuserstestthat/inst/include/testthat/vendor/catch.h0000644000176200001440000146455315047715224021133 0ustar liggesusers/* * Catch v1.9.6 * Generated: 2017-06-27 12:19:54.557875 * ---------------------------------------------------------- * This file has been merged from multiple headers. Please don't edit it directly * Copyright (c) 2012 Two Blue Cubes Ltd. All rights reserved. * * Distributed under the Boost Software License, Version 1.0. (See accompanying * file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) */ #ifndef TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED #define TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED #define TWOBLUECUBES_CATCH_HPP_INCLUDED #ifdef __clang__ # pragma clang system_header #elif defined __GNUC__ # pragma GCC system_header #endif // #included from: internal/catch_suppress_warnings.h #ifdef __clang__ # ifdef __ICC // icpc defines the __clang__ macro # pragma warning(push) # pragma warning(disable: 161 1682) # else // __ICC # pragma clang diagnostic ignored "-Wglobal-constructors" # pragma clang diagnostic ignored "-Wvariadic-macros" # pragma clang diagnostic ignored "-Wc99-extensions" # pragma clang diagnostic ignored "-Wunused-variable" # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" # pragma clang diagnostic ignored "-Wc++98-compat" # pragma clang diagnostic ignored "-Wc++98-compat-pedantic" # pragma clang diagnostic ignored "-Wswitch-enum" # pragma clang diagnostic ignored "-Wcovered-switch-default" # endif #elif defined __GNUC__ # pragma GCC diagnostic ignored "-Wvariadic-macros" # pragma GCC diagnostic ignored "-Wunused-variable" # pragma GCC diagnostic ignored "-Wparentheses" # pragma GCC diagnostic push # pragma GCC diagnostic ignored "-Wpadded" #endif #if defined(CATCH_CONFIG_MAIN) || defined(CATCH_CONFIG_RUNNER) # define CATCH_IMPL #endif #ifdef CATCH_IMPL # ifndef CLARA_CONFIG_MAIN # define CLARA_CONFIG_MAIN_NOT_DEFINED # define CLARA_CONFIG_MAIN # endif #endif // #included from: internal/catch_notimplemented_exception.h #define TWOBLUECUBES_CATCH_NOTIMPLEMENTED_EXCEPTION_H_INCLUDED // #included from: catch_common.h #define TWOBLUECUBES_CATCH_COMMON_H_INCLUDED // #included from: catch_compiler_capabilities.h #define TWOBLUECUBES_CATCH_COMPILER_CAPABILITIES_HPP_INCLUDED // Detect a number of compiler features - mostly C++11/14 conformance - by compiler // The following features are defined: // // CATCH_CONFIG_CPP11_NULLPTR : is nullptr supported? // CATCH_CONFIG_CPP11_NOEXCEPT : is noexcept supported? // CATCH_CONFIG_CPP11_GENERATED_METHODS : The delete and default keywords for compiler generated methods // CATCH_CONFIG_CPP11_IS_ENUM : std::is_enum is supported? // CATCH_CONFIG_CPP11_TUPLE : std::tuple is supported // CATCH_CONFIG_CPP11_LONG_LONG : is long long supported? // CATCH_CONFIG_CPP11_OVERRIDE : is override supported? // CATCH_CONFIG_CPP11_UNIQUE_PTR : is unique_ptr supported (otherwise use auto_ptr) // CATCH_CONFIG_CPP11_SHUFFLE : is std::shuffle supported? // CATCH_CONFIG_CPP11_TYPE_TRAITS : are type_traits and enable_if supported? // CATCH_CONFIG_CPP11_OR_GREATER : Is C++11 supported? // CATCH_CONFIG_VARIADIC_MACROS : are variadic macros supported? // CATCH_CONFIG_COUNTER : is the __COUNTER__ macro supported? // CATCH_CONFIG_WINDOWS_SEH : is Windows SEH supported? // CATCH_CONFIG_POSIX_SIGNALS : are POSIX signals supported? // **************** // Note to maintainers: if new toggles are added please document them // in configuration.md, too // **************** // In general each macro has a _NO_ form // (e.g. CATCH_CONFIG_CPP11_NO_NULLPTR) which disables the feature. // Many features, at point of detection, define an _INTERNAL_ macro, so they // can be combined, en-mass, with the _NO_ forms later. // All the C++11 features can be disabled with CATCH_CONFIG_NO_CPP11 #ifdef __cplusplus # if __cplusplus >= 201103L # define CATCH_CPP11_OR_GREATER # endif # if __cplusplus >= 201402L # define CATCH_CPP14_OR_GREATER # endif #endif #ifdef __clang__ # if __has_feature(cxx_nullptr) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # endif # if __has_feature(cxx_noexcept) # define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT # endif # if defined(CATCH_CPP11_OR_GREATER) # define CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ _Pragma( "clang diagnostic push" ) \ _Pragma( "clang diagnostic ignored \"-Wexit-time-destructors\"" ) # define CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ _Pragma( "clang diagnostic pop" ) # define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ _Pragma( "clang diagnostic push" ) \ _Pragma( "clang diagnostic ignored \"-Wparentheses\"" ) # define CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ _Pragma( "clang diagnostic pop" ) # endif #endif // __clang__ //////////////////////////////////////////////////////////////////////////////// // We know some environments not to support full POSIX signals #if defined(__CYGWIN__) || defined(__QNX__) # if !defined(CATCH_CONFIG_POSIX_SIGNALS) # define CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS # endif #endif //////////////////////////////////////////////////////////////////////////////// // Cygwin #ifdef __CYGWIN__ // Required for some versions of Cygwin to declare gettimeofday // see: http://stackoverflow.com/questions/36901803/gettimeofday-not-declared-in-this-scope-cygwin # define _BSD_SOURCE #endif // __CYGWIN__ //////////////////////////////////////////////////////////////////////////////// // Borland #ifdef __BORLANDC__ #endif // __BORLANDC__ //////////////////////////////////////////////////////////////////////////////// // EDG #ifdef __EDG_VERSION__ #endif // __EDG_VERSION__ //////////////////////////////////////////////////////////////////////////////// // Digital Mars #ifdef __DMC__ #endif // __DMC__ //////////////////////////////////////////////////////////////////////////////// // GCC #ifdef __GNUC__ # if __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && defined(__GXX_EXPERIMENTAL_CXX0X__) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # endif // - otherwise more recent versions define __cplusplus >= 201103L // and will get picked up below #endif // __GNUC__ //////////////////////////////////////////////////////////////////////////////// // Visual C++ #ifdef _MSC_VER #define CATCH_INTERNAL_CONFIG_WINDOWS_SEH #if (_MSC_VER >= 1600) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # define CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR #endif #if (_MSC_VER >= 1900 ) // (VC++ 13 (VS2015)) #define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT #define CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #define CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE #define CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS #endif #endif // _MSC_VER //////////////////////////////////////////////////////////////////////////////// // Use variadic macros if the compiler supports them #if ( defined _MSC_VER && _MSC_VER > 1400 && !defined __EDGE__) || \ ( defined __WAVE__ && __WAVE_HAS_VARIADICS ) || \ ( defined __GNUC__ && __GNUC__ >= 3 ) || \ ( !defined __cplusplus && __STDC_VERSION__ >= 199901L || __cplusplus >= 201103L ) #define CATCH_INTERNAL_CONFIG_VARIADIC_MACROS #endif // Use __COUNTER__ if the compiler supports it #if ( defined _MSC_VER && _MSC_VER >= 1300 ) || \ ( defined __GNUC__ && ( __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3 )) ) || \ ( defined __clang__ && __clang_major__ >= 3 ) #define CATCH_INTERNAL_CONFIG_COUNTER #endif //////////////////////////////////////////////////////////////////////////////// // C++ language feature support // catch all support for C++11 #if defined(CATCH_CPP11_OR_GREATER) # if !defined(CATCH_INTERNAL_CONFIG_CPP11_NULLPTR) # define CATCH_INTERNAL_CONFIG_CPP11_NULLPTR # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT # define CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS # define CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM # define CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM # endif # ifndef CATCH_INTERNAL_CONFIG_CPP11_TUPLE # define CATCH_INTERNAL_CONFIG_CPP11_TUPLE # endif # ifndef CATCH_INTERNAL_CONFIG_VARIADIC_MACROS # define CATCH_INTERNAL_CONFIG_VARIADIC_MACROS # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG) # define CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE) # define CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) # define CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE) # define CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE # endif # if !defined(CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS) # define CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS # endif #endif // __cplusplus >= 201103L // Now set the actual defines based on the above + anything the user has configured #if defined(CATCH_INTERNAL_CONFIG_CPP11_NULLPTR) && !defined(CATCH_CONFIG_CPP11_NO_NULLPTR) && !defined(CATCH_CONFIG_CPP11_NULLPTR) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_NULLPTR #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_CONFIG_CPP11_NO_NOEXCEPT) && !defined(CATCH_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_NOEXCEPT #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_GENERATED_METHODS) && !defined(CATCH_CONFIG_CPP11_NO_GENERATED_METHODS) && !defined(CATCH_CONFIG_CPP11_GENERATED_METHODS) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_GENERATED_METHODS #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_IS_ENUM) && !defined(CATCH_CONFIG_CPP11_NO_IS_ENUM) && !defined(CATCH_CONFIG_CPP11_IS_ENUM) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_IS_ENUM #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_TUPLE) && !defined(CATCH_CONFIG_CPP11_NO_TUPLE) && !defined(CATCH_CONFIG_CPP11_TUPLE) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_TUPLE #endif #if defined(CATCH_INTERNAL_CONFIG_VARIADIC_MACROS) && !defined(CATCH_CONFIG_NO_VARIADIC_MACROS) && !defined(CATCH_CONFIG_VARIADIC_MACROS) # define CATCH_CONFIG_VARIADIC_MACROS #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_LONG_LONG) && !defined(CATCH_CONFIG_CPP11_NO_LONG_LONG) && !defined(CATCH_CONFIG_CPP11_LONG_LONG) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_LONG_LONG #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_OVERRIDE) && !defined(CATCH_CONFIG_CPP11_NO_OVERRIDE) && !defined(CATCH_CONFIG_CPP11_OVERRIDE) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_OVERRIDE #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) && !defined(CATCH_CONFIG_CPP11_NO_UNIQUE_PTR) && !defined(CATCH_CONFIG_CPP11_UNIQUE_PTR) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_UNIQUE_PTR #endif // Use of __COUNTER__ is suppressed if __JETBRAINS_IDE__ is #defined (meaning we're being parsed by a JetBrains IDE for // analytics) because, at time of writing, __COUNTER__ is not properly handled by it. // This does not affect compilation #if defined(CATCH_INTERNAL_CONFIG_COUNTER) && !defined(CATCH_CONFIG_NO_COUNTER) && !defined(CATCH_CONFIG_COUNTER) && !defined(__JETBRAINS_IDE__) # define CATCH_CONFIG_COUNTER #endif #if defined(CATCH_INTERNAL_CONFIG_CPP11_SHUFFLE) && !defined(CATCH_CONFIG_CPP11_NO_SHUFFLE) && !defined(CATCH_CONFIG_CPP11_SHUFFLE) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_SHUFFLE #endif # if defined(CATCH_INTERNAL_CONFIG_CPP11_TYPE_TRAITS) && !defined(CATCH_CONFIG_CPP11_NO_TYPE_TRAITS) && !defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) && !defined(CATCH_CONFIG_NO_CPP11) # define CATCH_CONFIG_CPP11_TYPE_TRAITS # endif #if defined(CATCH_INTERNAL_CONFIG_WINDOWS_SEH) && !defined(CATCH_CONFIG_NO_WINDOWS_SEH) && !defined(CATCH_CONFIG_WINDOWS_SEH) # define CATCH_CONFIG_WINDOWS_SEH #endif // This is set by default, because we assume that unix compilers are posix-signal-compatible by default. #if !defined(CATCH_INTERNAL_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_NO_POSIX_SIGNALS) && !defined(CATCH_CONFIG_POSIX_SIGNALS) # define CATCH_CONFIG_POSIX_SIGNALS #endif #if !defined(CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS) # define CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS # define CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS #endif #if !defined(CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS) # define CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS # define CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS #endif // noexcept support: #if defined(CATCH_CONFIG_CPP11_NOEXCEPT) && !defined(CATCH_NOEXCEPT) # define CATCH_NOEXCEPT noexcept # define CATCH_NOEXCEPT_IS(x) noexcept(x) #else # define CATCH_NOEXCEPT throw() # define CATCH_NOEXCEPT_IS(x) #endif // nullptr support #ifdef CATCH_CONFIG_CPP11_NULLPTR # define CATCH_NULL nullptr #else # define CATCH_NULL NULL #endif // override support #ifdef CATCH_CONFIG_CPP11_OVERRIDE # define CATCH_OVERRIDE override #else # define CATCH_OVERRIDE #endif // unique_ptr support #ifdef CATCH_CONFIG_CPP11_UNIQUE_PTR # define CATCH_AUTO_PTR( T ) std::unique_ptr #else # define CATCH_AUTO_PTR( T ) std::auto_ptr #endif #define INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) name##line #define INTERNAL_CATCH_UNIQUE_NAME_LINE( name, line ) INTERNAL_CATCH_UNIQUE_NAME_LINE2( name, line ) #ifdef CATCH_CONFIG_COUNTER # define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __COUNTER__ ) #else # define INTERNAL_CATCH_UNIQUE_NAME( name ) INTERNAL_CATCH_UNIQUE_NAME_LINE( name, __LINE__ ) #endif #define INTERNAL_CATCH_STRINGIFY2( expr ) #expr #define INTERNAL_CATCH_STRINGIFY( expr ) INTERNAL_CATCH_STRINGIFY2( expr ) #include #include namespace Catch { inline bool HasUncaughtException() { #if __cplusplus >= 202002L return std::uncaught_exceptions() > 0; #else return std::uncaught_exception(); #endif } struct IConfig; struct CaseSensitive { enum Choice { Yes, No }; }; class NonCopyable { #ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS NonCopyable( NonCopyable const& ) = delete; NonCopyable( NonCopyable && ) = delete; NonCopyable& operator = ( NonCopyable const& ) = delete; NonCopyable& operator = ( NonCopyable && ) = delete; #else NonCopyable( NonCopyable const& info ); NonCopyable& operator = ( NonCopyable const& ); #endif protected: NonCopyable() {} virtual ~NonCopyable(); }; class SafeBool { public: typedef void (SafeBool::*type)() const; static type makeSafe( bool value ) { return value ? &SafeBool::trueValue : 0; } private: void trueValue() const {} }; template inline void deleteAll( ContainerT& container ) { typename ContainerT::const_iterator it = container.begin(); typename ContainerT::const_iterator itEnd = container.end(); for(; it != itEnd; ++it ) delete *it; } template inline void deleteAllValues( AssociativeContainerT& container ) { typename AssociativeContainerT::const_iterator it = container.begin(); typename AssociativeContainerT::const_iterator itEnd = container.end(); for(; it != itEnd; ++it ) delete it->second; } bool startsWith( std::string const& s, std::string const& prefix ); bool startsWith( std::string const& s, char prefix ); bool endsWith( std::string const& s, std::string const& suffix ); bool endsWith( std::string const& s, char suffix ); bool contains( std::string const& s, std::string const& infix ); void toLowerInPlace( std::string& s ); std::string toLower( std::string const& s ); std::string trim( std::string const& str ); bool replaceInPlace( std::string& str, std::string const& replaceThis, std::string const& withThis ); struct pluralise { pluralise( std::size_t count, std::string const& label ); friend std::ostream& operator << ( std::ostream& os, pluralise const& pluraliser ); std::size_t m_count; std::string m_label; }; struct SourceLineInfo { SourceLineInfo(); SourceLineInfo( char const* _file, std::size_t _line ); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS SourceLineInfo(SourceLineInfo const& other) = default; SourceLineInfo( SourceLineInfo && ) = default; SourceLineInfo& operator = ( SourceLineInfo const& ) = default; SourceLineInfo& operator = ( SourceLineInfo && ) = default; # endif bool empty() const; bool operator == ( SourceLineInfo const& other ) const; bool operator < ( SourceLineInfo const& other ) const; char const* file; std::size_t line; }; std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ); // This is just here to avoid compiler warnings with macro constants and boolean literals inline bool isTrue( bool value ){ return value; } inline bool alwaysTrue() { return true; } inline bool alwaysFalse() { return false; } void throwLogicError( std::string const& message, SourceLineInfo const& locationInfo ); void seedRng( IConfig const& config ); unsigned int rngSeed(); // Use this in variadic streaming macros to allow // >> +StreamEndStop // as well as // >> stuff +StreamEndStop struct StreamEndStop { std::string operator+() { return std::string(); } }; template T const& operator + ( T const& value, StreamEndStop ) { return value; } } #define CATCH_INTERNAL_LINEINFO ::Catch::SourceLineInfo( __FILE__, static_cast( __LINE__ ) ) #define CATCH_INTERNAL_ERROR( msg ) ::Catch::throwLogicError( msg, CATCH_INTERNAL_LINEINFO ); namespace Catch { class NotImplementedException : public std::exception { public: NotImplementedException( SourceLineInfo const& lineInfo ); NotImplementedException( NotImplementedException const& ) {} virtual ~NotImplementedException() CATCH_NOEXCEPT {} virtual const char* what() const CATCH_NOEXCEPT; private: std::string m_what; SourceLineInfo m_lineInfo; }; } // end namespace Catch /////////////////////////////////////////////////////////////////////////////// #define CATCH_NOT_IMPLEMENTED throw Catch::NotImplementedException( CATCH_INTERNAL_LINEINFO ) // #included from: internal/catch_context.h #define TWOBLUECUBES_CATCH_CONTEXT_H_INCLUDED // #included from: catch_interfaces_generators.h #define TWOBLUECUBES_CATCH_INTERFACES_GENERATORS_H_INCLUDED #include namespace Catch { struct IGeneratorInfo { virtual ~IGeneratorInfo(); virtual bool moveNext() = 0; virtual std::size_t getCurrentIndex() const = 0; }; struct IGeneratorsForTest { virtual ~IGeneratorsForTest(); virtual IGeneratorInfo& getGeneratorInfo( std::string const& fileInfo, std::size_t size ) = 0; virtual bool moveNext() = 0; }; IGeneratorsForTest* createGeneratorsForTest(); } // end namespace Catch // #included from: catch_ptr.hpp #define TWOBLUECUBES_CATCH_PTR_HPP_INCLUDED #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif namespace Catch { // An intrusive reference counting smart pointer. // T must implement addRef() and release() methods // typically implementing the IShared interface template class Ptr { public: Ptr() : m_p( CATCH_NULL ){} Ptr( T* p ) : m_p( p ){ if( m_p ) m_p->addRef(); } Ptr( Ptr const& other ) : m_p( other.m_p ){ if( m_p ) m_p->addRef(); } ~Ptr(){ if( m_p ) m_p->release(); } void reset() { if( m_p ) m_p->release(); m_p = CATCH_NULL; } Ptr& operator = ( T* p ){ Ptr temp( p ); swap( temp ); return *this; } Ptr& operator = ( Ptr const& other ){ Ptr temp( other ); swap( temp ); return *this; } void swap( Ptr& other ) { std::swap( m_p, other.m_p ); } T* get() const{ return m_p; } T& operator*() const { return *m_p; } T* operator->() const { return m_p; } bool operator !() const { return m_p == CATCH_NULL; } operator SafeBool::type() const { return SafeBool::makeSafe( m_p != CATCH_NULL ); } private: T* m_p; }; struct IShared : NonCopyable { virtual ~IShared(); virtual void addRef() const = 0; virtual void release() const = 0; }; template struct SharedImpl : T { SharedImpl() : m_rc( 0 ){} virtual void addRef() const { ++m_rc; } virtual void release() const { if( --m_rc == 0 ) delete this; } mutable unsigned int m_rc; }; } // end namespace Catch #ifdef __clang__ # pragma clang diagnostic pop #endif namespace Catch { class TestCase; class Stream; struct IResultCapture; struct IRunner; struct IGeneratorsForTest; struct IConfig; struct IContext { virtual ~IContext(); virtual IResultCapture* getResultCapture() = 0; virtual IRunner* getRunner() = 0; virtual size_t getGeneratorIndex( std::string const& fileInfo, size_t totalSize ) = 0; virtual bool advanceGeneratorsForCurrentTest() = 0; virtual Ptr getConfig() const = 0; }; struct IMutableContext : IContext { virtual ~IMutableContext(); virtual void setResultCapture( IResultCapture* resultCapture ) = 0; virtual void setRunner( IRunner* runner ) = 0; virtual void setConfig( Ptr const& config ) = 0; }; IContext& getCurrentContext(); IMutableContext& getCurrentMutableContext(); void cleanUpContext(); Stream createStream( std::string const& streamName ); } // #included from: internal/catch_test_registry.hpp #define TWOBLUECUBES_CATCH_TEST_REGISTRY_HPP_INCLUDED // #included from: catch_interfaces_testcase.h #define TWOBLUECUBES_CATCH_INTERFACES_TESTCASE_H_INCLUDED #include namespace Catch { class TestSpec; struct ITestCase : IShared { virtual void invoke () const = 0; protected: virtual ~ITestCase(); }; class TestCase; struct IConfig; struct ITestCaseRegistry { virtual ~ITestCaseRegistry(); virtual std::vector const& getAllTests() const = 0; virtual std::vector const& getAllTestsSorted( IConfig const& config ) const = 0; }; bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config ); std::vector filterTests( std::vector const& testCases, TestSpec const& testSpec, IConfig const& config ); std::vector const& getAllTestCasesSorted( IConfig const& config ); } namespace Catch { template class MethodTestCase : public SharedImpl { public: MethodTestCase( void (C::*method)() ) : m_method( method ) {} virtual void invoke() const { C obj; (obj.*m_method)(); } private: virtual ~MethodTestCase() {} void (C::*m_method)(); }; typedef void(*TestFunction)(); struct NameAndDesc { NameAndDesc( const char* _name = "", const char* _description= "" ) : name( _name ), description( _description ) {} const char* name; const char* description; }; void registerTestCase ( ITestCase* testCase, char const* className, NameAndDesc const& nameAndDesc, SourceLineInfo const& lineInfo ); struct AutoReg { AutoReg ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ); template AutoReg ( void (C::*method)(), char const* className, NameAndDesc const& nameAndDesc, SourceLineInfo const& lineInfo ) { registerTestCase ( new MethodTestCase( method ), className, nameAndDesc, lineInfo ); } ~AutoReg(); private: AutoReg( AutoReg const& ); void operator= ( AutoReg const& ); }; void registerTestCaseFunction ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ); } // end namespace Catch #ifdef CATCH_CONFIG_VARIADIC_MACROS /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TESTCASE2( TestName, ... ) \ static void TestName(); \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &TestName, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( __VA_ARGS__ ) ); } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ static void TestName() #define INTERNAL_CATCH_TESTCASE( ... ) \ INTERNAL_CATCH_TESTCASE2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), __VA_ARGS__ ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_METHOD_AS_TEST_CASE( QualifiedMethod, ... ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &QualifiedMethod, "&" #QualifiedMethod, Catch::NameAndDesc( __VA_ARGS__ ), CATCH_INTERNAL_LINEINFO ); } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TEST_CASE_METHOD2( TestName, ClassName, ... )\ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ \ struct TestName : ClassName{ \ void test(); \ }; \ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar ) ( &TestName::test, #ClassName, Catch::NameAndDesc( __VA_ARGS__ ), CATCH_INTERNAL_LINEINFO ); \ } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ void TestName::test() #define INTERNAL_CATCH_TEST_CASE_METHOD( ClassName, ... ) \ INTERNAL_CATCH_TEST_CASE_METHOD2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), ClassName, __VA_ARGS__ ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_REGISTER_TESTCASE( Function, ... ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ Catch::AutoReg( Function, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( __VA_ARGS__ ) ); \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS #else /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TESTCASE2( TestName, Name, Desc ) \ static void TestName(); \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &TestName, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( Name, Desc ) ); }\ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ static void TestName() #define INTERNAL_CATCH_TESTCASE( Name, Desc ) \ INTERNAL_CATCH_TESTCASE2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), Name, Desc ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_METHOD_AS_TEST_CASE( QualifiedMethod, Name, Desc ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar )( &QualifiedMethod, "&" #QualifiedMethod, Catch::NameAndDesc( Name, Desc ), CATCH_INTERNAL_LINEINFO ); } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TEST_CASE_METHOD2( TestCaseName, ClassName, TestName, Desc )\ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ namespace{ \ struct TestCaseName : ClassName{ \ void test(); \ }; \ Catch::AutoReg INTERNAL_CATCH_UNIQUE_NAME( autoRegistrar ) ( &TestCaseName::test, #ClassName, Catch::NameAndDesc( TestName, Desc ), CATCH_INTERNAL_LINEINFO ); \ } \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS \ void TestCaseName::test() #define INTERNAL_CATCH_TEST_CASE_METHOD( ClassName, TestName, Desc )\ INTERNAL_CATCH_TEST_CASE_METHOD2( INTERNAL_CATCH_UNIQUE_NAME( ____C_A_T_C_H____T_E_S_T____ ), ClassName, TestName, Desc ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_REGISTER_TESTCASE( Function, Name, Desc ) \ CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS \ Catch::AutoReg( Function, CATCH_INTERNAL_LINEINFO, Catch::NameAndDesc( Name, Desc ) ); \ CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS #endif // #included from: internal/catch_capture.hpp #define TWOBLUECUBES_CATCH_CAPTURE_HPP_INCLUDED // #included from: catch_result_builder.h #define TWOBLUECUBES_CATCH_RESULT_BUILDER_H_INCLUDED // #included from: catch_result_type.h #define TWOBLUECUBES_CATCH_RESULT_TYPE_H_INCLUDED namespace Catch { // ResultWas::OfType enum struct ResultWas { enum OfType { Unknown = -1, Ok = 0, Info = 1, Warning = 2, FailureBit = 0x10, ExpressionFailed = FailureBit | 1, ExplicitFailure = FailureBit | 2, Exception = 0x100 | FailureBit, ThrewException = Exception | 1, DidntThrowException = Exception | 2, FatalErrorCondition = 0x200 | FailureBit }; }; inline bool isOk( ResultWas::OfType resultType ) { return ( resultType & ResultWas::FailureBit ) == 0; } inline bool isJustInfo( int flags ) { return flags == ResultWas::Info; } // ResultDisposition::Flags enum struct ResultDisposition { enum Flags { Normal = 0x01, ContinueOnFailure = 0x02, // Failures fail test, but execution continues FalseTest = 0x04, // Prefix expression with ! SuppressFail = 0x08 // Failures are reported but do not fail the test }; }; inline ResultDisposition::Flags operator | ( ResultDisposition::Flags lhs, ResultDisposition::Flags rhs ) { return static_cast( static_cast( lhs ) | static_cast( rhs ) ); } inline bool shouldContinueOnFailure( int flags ) { return ( flags & ResultDisposition::ContinueOnFailure ) != 0; } inline bool isFalseTest( int flags ) { return ( flags & ResultDisposition::FalseTest ) != 0; } inline bool shouldSuppressFailure( int flags ) { return ( flags & ResultDisposition::SuppressFail ) != 0; } } // end namespace Catch // #included from: catch_assertionresult.h #define TWOBLUECUBES_CATCH_ASSERTIONRESULT_H_INCLUDED #include namespace Catch { struct STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison; struct DecomposedExpression { virtual ~DecomposedExpression() {} virtual bool isBinaryExpression() const { return false; } virtual void reconstructExpression( std::string& dest ) const = 0; // Only simple binary comparisons can be decomposed. // If more complex check is required then wrap sub-expressions in parentheses. template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator + ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator - ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator * ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator / ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator % ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator && ( T const& ); template STATIC_ASSERT_Expression_Too_Complex_Please_Rewrite_As_Binary_Comparison& operator || ( T const& ); private: DecomposedExpression& operator = (DecomposedExpression const&); }; struct AssertionInfo { AssertionInfo() {} AssertionInfo( char const * _macroName, SourceLineInfo const& _lineInfo, char const * _capturedExpression, ResultDisposition::Flags _resultDisposition, char const * _secondArg = ""); char const * macroName; SourceLineInfo lineInfo; char const * capturedExpression; ResultDisposition::Flags resultDisposition; char const * secondArg; }; struct AssertionResultData { AssertionResultData() : decomposedExpression( CATCH_NULL ) , resultType( ResultWas::Unknown ) , negated( false ) , parenthesized( false ) {} void negate( bool parenthesize ) { negated = !negated; parenthesized = parenthesize; if( resultType == ResultWas::Ok ) resultType = ResultWas::ExpressionFailed; else if( resultType == ResultWas::ExpressionFailed ) resultType = ResultWas::Ok; } std::string const& reconstructExpression() const { if( decomposedExpression != CATCH_NULL ) { decomposedExpression->reconstructExpression( reconstructedExpression ); if( parenthesized ) { reconstructedExpression.insert( 0, 1, '(' ); reconstructedExpression.append( 1, ')' ); } if( negated ) { reconstructedExpression.insert( 0, 1, '!' ); } decomposedExpression = CATCH_NULL; } return reconstructedExpression; } mutable DecomposedExpression const* decomposedExpression; mutable std::string reconstructedExpression; std::string message; ResultWas::OfType resultType; bool negated; bool parenthesized; }; class AssertionResult { public: AssertionResult(); AssertionResult( AssertionInfo const& info, AssertionResultData const& data ); ~AssertionResult(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS AssertionResult( AssertionResult const& ) = default; AssertionResult( AssertionResult && ) = default; AssertionResult& operator = ( AssertionResult const& ) = default; AssertionResult& operator = ( AssertionResult && ) = default; # endif bool isOk() const; bool succeeded() const; ResultWas::OfType getResultType() const; bool hasExpression() const; bool hasMessage() const; std::string getExpression() const; std::string getExpressionInMacro() const; bool hasExpandedExpression() const; std::string getExpandedExpression() const; std::string getMessage() const; SourceLineInfo getSourceInfo() const; std::string getTestMacroName() const; void discardDecomposedExpression() const; void expandDecomposedExpression() const; protected: AssertionInfo m_info; AssertionResultData m_resultData; }; } // end namespace Catch // #included from: catch_matchers.hpp #define TWOBLUECUBES_CATCH_MATCHERS_HPP_INCLUDED namespace Catch { namespace Matchers { namespace Impl { template struct MatchAllOf; template struct MatchAnyOf; template struct MatchNotOf; class MatcherUntypedBase { public: std::string toString() const { if( m_cachedToString.empty() ) m_cachedToString = describe(); return m_cachedToString; } protected: virtual ~MatcherUntypedBase(); virtual std::string describe() const = 0; mutable std::string m_cachedToString; private: MatcherUntypedBase& operator = ( MatcherUntypedBase const& ); }; template struct MatcherMethod { virtual bool match( ObjectT const& arg ) const = 0; }; template struct MatcherMethod { virtual bool match( PtrT* arg ) const = 0; }; template struct MatcherBase : MatcherUntypedBase, MatcherMethod { MatchAllOf operator && ( MatcherBase const& other ) const; MatchAnyOf operator || ( MatcherBase const& other ) const; MatchNotOf operator ! () const; }; template struct MatchAllOf : MatcherBase { virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if (!m_matchers[i]->match(arg)) return false; } return true; } virtual std::string describe() const CATCH_OVERRIDE { std::string description; description.reserve( 4 + m_matchers.size()*32 ); description += "( "; for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if( i != 0 ) description += " and "; description += m_matchers[i]->toString(); } description += " )"; return description; } MatchAllOf& operator && ( MatcherBase const& other ) { m_matchers.push_back( &other ); return *this; } std::vector const*> m_matchers; }; template struct MatchAnyOf : MatcherBase { virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if (m_matchers[i]->match(arg)) return true; } return false; } virtual std::string describe() const CATCH_OVERRIDE { std::string description; description.reserve( 4 + m_matchers.size()*32 ); description += "( "; for( std::size_t i = 0; i < m_matchers.size(); ++i ) { if( i != 0 ) description += " or "; description += m_matchers[i]->toString(); } description += " )"; return description; } MatchAnyOf& operator || ( MatcherBase const& other ) { m_matchers.push_back( &other ); return *this; } std::vector const*> m_matchers; }; template struct MatchNotOf : MatcherBase { MatchNotOf( MatcherBase const& underlyingMatcher ) : m_underlyingMatcher( underlyingMatcher ) {} virtual bool match( ArgT const& arg ) const CATCH_OVERRIDE { return !m_underlyingMatcher.match( arg ); } virtual std::string describe() const CATCH_OVERRIDE { return "not " + m_underlyingMatcher.toString(); } MatcherBase const& m_underlyingMatcher; }; template MatchAllOf MatcherBase::operator && ( MatcherBase const& other ) const { return MatchAllOf() && *this && other; } template MatchAnyOf MatcherBase::operator || ( MatcherBase const& other ) const { return MatchAnyOf() || *this || other; } template MatchNotOf MatcherBase::operator ! () const { return MatchNotOf( *this ); } } // namespace Impl // The following functions create the actual matcher objects. // This allows the types to be inferred // - deprecated: prefer ||, && and ! template inline Impl::MatchNotOf Not( Impl::MatcherBase const& underlyingMatcher ) { return Impl::MatchNotOf( underlyingMatcher ); } template inline Impl::MatchAllOf AllOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2 ) { return Impl::MatchAllOf() && m1 && m2; } template inline Impl::MatchAllOf AllOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2, Impl::MatcherBase const& m3 ) { return Impl::MatchAllOf() && m1 && m2 && m3; } template inline Impl::MatchAnyOf AnyOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2 ) { return Impl::MatchAnyOf() || m1 || m2; } template inline Impl::MatchAnyOf AnyOf( Impl::MatcherBase const& m1, Impl::MatcherBase const& m2, Impl::MatcherBase const& m3 ) { return Impl::MatchAnyOf() || m1 || m2 || m3; } } // namespace Matchers using namespace Matchers; using Matchers::Impl::MatcherBase; } // namespace Catch namespace Catch { struct TestFailureException{}; template class ExpressionLhs; struct CopyableStream { CopyableStream() {} CopyableStream( CopyableStream const& other ) { oss << other.oss.str(); } CopyableStream& operator=( CopyableStream const& other ) { oss.str(std::string()); oss << other.oss.str(); return *this; } std::ostringstream oss; }; class ResultBuilder : public DecomposedExpression { public: ResultBuilder( char const* macroName, SourceLineInfo const& lineInfo, char const* capturedExpression, ResultDisposition::Flags resultDisposition, char const* secondArg = "" ); ~ResultBuilder(); template ExpressionLhs operator <= ( T const& operand ); ExpressionLhs operator <= ( bool value ); template ResultBuilder& operator << ( T const& value ) { m_stream().oss << value; return *this; } ResultBuilder& setResultType( ResultWas::OfType result ); ResultBuilder& setResultType( bool result ); void endExpression( DecomposedExpression const& expr ); virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE; AssertionResult build() const; AssertionResult build( DecomposedExpression const& expr ) const; void useActiveException( ResultDisposition::Flags resultDisposition = ResultDisposition::Normal ); void captureResult( ResultWas::OfType resultType ); void captureExpression(); void captureExpectedException( std::string const& expectedMessage ); void captureExpectedException( Matchers::Impl::MatcherBase const& matcher ); void handleResult( AssertionResult const& result ); void react(); bool shouldDebugBreak() const; bool allowThrows() const; template void captureMatch( ArgT const& arg, MatcherT const& matcher, char const* matcherString ); void setExceptionGuard(); void unsetExceptionGuard(); private: AssertionInfo m_assertionInfo; AssertionResultData m_data; static CopyableStream &m_stream() { static CopyableStream s; return s; } bool m_shouldDebugBreak; bool m_shouldThrow; bool m_guardException; }; } // namespace Catch // Include after due to circular dependency: // #included from: catch_expression_lhs.hpp #define TWOBLUECUBES_CATCH_EXPRESSION_LHS_HPP_INCLUDED // #included from: catch_evaluate.hpp #define TWOBLUECUBES_CATCH_EVALUATE_HPP_INCLUDED #ifdef _MSC_VER # pragma warning(push) # pragma warning(disable:4389) // '==' : signed/unsigned mismatch # pragma warning(disable:4312) // Converting int to T* using reinterpret_cast (issue on x64 platform) #endif #include namespace Catch { namespace Internal { enum Operator { IsEqualTo, IsNotEqualTo, IsLessThan, IsGreaterThan, IsLessThanOrEqualTo, IsGreaterThanOrEqualTo }; template struct OperatorTraits { static const char* getName(){ return "*error*"; } }; template<> struct OperatorTraits { static const char* getName(){ return "=="; } }; template<> struct OperatorTraits { static const char* getName(){ return "!="; } }; template<> struct OperatorTraits { static const char* getName(){ return "<"; } }; template<> struct OperatorTraits { static const char* getName(){ return ">"; } }; template<> struct OperatorTraits { static const char* getName(){ return "<="; } }; template<> struct OperatorTraits{ static const char* getName(){ return ">="; } }; template inline T& opCast(T const& t) { return const_cast(t); } // nullptr_t support based on pull request #154 from Konstantin Baumann #ifdef CATCH_CONFIG_CPP11_NULLPTR inline std::nullptr_t opCast(std::nullptr_t) { return nullptr; } #endif // CATCH_CONFIG_CPP11_NULLPTR // So the compare overloads can be operator agnostic we convey the operator as a template // enum, which is used to specialise an Evaluator for doing the comparison. template class Evaluator{}; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs) { return bool( opCast( lhs ) == opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) != opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) < opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) > opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) >= opCast( rhs ) ); } }; template struct Evaluator { static bool evaluate( T1 const& lhs, T2 const& rhs ) { return bool( opCast( lhs ) <= opCast( rhs ) ); } }; template bool applyEvaluator( T1 const& lhs, T2 const& rhs ) { return Evaluator::evaluate( lhs, rhs ); } // This level of indirection allows us to specialise for integer types // to avoid signed/ unsigned warnings // "base" overload template bool compare( T1 const& lhs, T2 const& rhs ) { return Evaluator::evaluate( lhs, rhs ); } // unsigned X to int template bool compare( unsigned int lhs, int rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned long lhs, int rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned char lhs, int rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } // unsigned X to long template bool compare( unsigned int lhs, long rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned long lhs, long rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } template bool compare( unsigned char lhs, long rhs ) { return applyEvaluator( lhs, static_cast( rhs ) ); } // int to unsigned X template bool compare( int lhs, unsigned int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( int lhs, unsigned long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( int lhs, unsigned char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // long to unsigned X template bool compare( long lhs, unsigned int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long lhs, unsigned long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long lhs, unsigned char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // pointer to long (when comparing against NULL) template bool compare( long lhs, T* rhs ) { return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); } template bool compare( T* lhs, long rhs ) { return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); } // pointer to int (when comparing against NULL) template bool compare( int lhs, T* rhs ) { return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); } template bool compare( T* lhs, int rhs ) { return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); } #ifdef CATCH_CONFIG_CPP11_LONG_LONG // long long to unsigned X template bool compare( long long lhs, unsigned int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long long lhs, unsigned long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long long lhs, unsigned long long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( long long lhs, unsigned char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // unsigned long long to X template bool compare( unsigned long long lhs, int rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( unsigned long long lhs, long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( unsigned long long lhs, long long rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } template bool compare( unsigned long long lhs, char rhs ) { return applyEvaluator( static_cast( lhs ), rhs ); } // pointer to long long (when comparing against NULL) template bool compare( long long lhs, T* rhs ) { return Evaluator::evaluate( reinterpret_cast( lhs ), rhs ); } template bool compare( T* lhs, long long rhs ) { return Evaluator::evaluate( lhs, reinterpret_cast( rhs ) ); } #endif // CATCH_CONFIG_CPP11_LONG_LONG #ifdef CATCH_CONFIG_CPP11_NULLPTR // pointer to nullptr_t (when comparing against nullptr) template bool compare( std::nullptr_t, T* rhs ) { return Evaluator::evaluate( nullptr, rhs ); } template bool compare( T* lhs, std::nullptr_t ) { return Evaluator::evaluate( lhs, nullptr ); } #endif // CATCH_CONFIG_CPP11_NULLPTR } // end of namespace Internal } // end of namespace Catch #ifdef _MSC_VER # pragma warning(pop) #endif // #included from: catch_tostring.h #define TWOBLUECUBES_CATCH_TOSTRING_H_INCLUDED #include #include #include #include #include #ifdef __OBJC__ // #included from: catch_objc_arc.hpp #define TWOBLUECUBES_CATCH_OBJC_ARC_HPP_INCLUDED #import #ifdef __has_feature #define CATCH_ARC_ENABLED __has_feature(objc_arc) #else #define CATCH_ARC_ENABLED 0 #endif void arcSafeRelease( NSObject* obj ); id performOptionalSelector( id obj, SEL sel ); #if !CATCH_ARC_ENABLED inline void arcSafeRelease( NSObject* obj ) { [obj release]; } inline id performOptionalSelector( id obj, SEL sel ) { if( [obj respondsToSelector: sel] ) return [obj performSelector: sel]; return nil; } #define CATCH_UNSAFE_UNRETAINED #define CATCH_ARC_STRONG #else inline void arcSafeRelease( NSObject* ){} inline id performOptionalSelector( id obj, SEL sel ) { #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Warc-performSelector-leaks" #endif if( [obj respondsToSelector: sel] ) return [obj performSelector: sel]; #ifdef __clang__ # pragma clang diagnostic pop #endif return nil; } #define CATCH_UNSAFE_UNRETAINED __unsafe_unretained #define CATCH_ARC_STRONG __strong #endif #endif #ifdef CATCH_CONFIG_CPP11_TUPLE #include #endif #ifdef CATCH_CONFIG_CPP11_IS_ENUM #include #endif namespace Catch { // Why we're here. template std::string toString( T const& value ); // Built in overloads std::string toString( std::string const& value ); std::string toString( std::wstring const& value ); std::string toString( const char* const value ); std::string toString( char* const value ); std::string toString( const wchar_t* const value ); std::string toString( wchar_t* const value ); std::string toString( int value ); std::string toString( unsigned long value ); std::string toString( unsigned int value ); std::string toString( const double value ); std::string toString( const float value ); std::string toString( bool value ); std::string toString( char value ); std::string toString( signed char value ); std::string toString( unsigned char value ); #ifdef CATCH_CONFIG_CPP11_LONG_LONG std::string toString( long long value ); std::string toString( unsigned long long value ); #endif #ifdef CATCH_CONFIG_CPP11_NULLPTR std::string toString( std::nullptr_t ); #endif #ifdef __OBJC__ std::string toString( NSString const * const& nsstring ); std::string toString( NSString * CATCH_ARC_STRONG & nsstring ); std::string toString( NSObject* const& nsObject ); #endif namespace Detail { extern const std::string unprintableString; #if !defined(CATCH_CONFIG_CPP11_STREAM_INSERTABLE_CHECK) struct BorgType { template BorgType( T const& ); }; struct TrueType { char sizer[1]; }; struct FalseType { char sizer[2]; }; TrueType& testStreamable( std::ostream& ); FalseType testStreamable( FalseType ); FalseType operator<<( std::ostream const&, BorgType const& ); template struct IsStreamInsertable { static std::ostream &s; static T const&t; enum { value = sizeof( testStreamable(s << t) ) == sizeof( TrueType ) }; }; #else template class IsStreamInsertable { template static auto test(int) -> decltype( std::declval() << std::declval(), std::true_type() ); template static auto test(...) -> std::false_type; public: static const bool value = decltype(test(0))::value; }; #endif #if defined(CATCH_CONFIG_CPP11_IS_ENUM) template::value > struct EnumStringMaker { static std::string convert( T const& ) { return unprintableString; } }; template struct EnumStringMaker { static std::string convert( T const& v ) { return ::Catch::toString( static_cast::type>(v) ); } }; #endif template struct StringMakerBase { #if defined(CATCH_CONFIG_CPP11_IS_ENUM) template static std::string convert( T const& v ) { return EnumStringMaker::convert( v ); } #else template static std::string convert( T const& ) { return unprintableString; } #endif }; template<> struct StringMakerBase { template static std::string convert( T const& _value ) { std::ostringstream oss; oss << _value; return oss.str(); } }; std::string rawMemoryToString( const void *object, std::size_t size ); template inline std::string rawMemoryToString( const T& object ) { return rawMemoryToString( &object, sizeof(object) ); } } // end namespace Detail template struct StringMaker : Detail::StringMakerBase::value> {}; template struct StringMaker { template static std::string convert( U* p ) { if( !p ) return "NULL"; else return Detail::rawMemoryToString( p ); } }; template struct StringMaker { static std::string convert( R C::* p ) { if( !p ) return "NULL"; else return Detail::rawMemoryToString( p ); } }; namespace Detail { template std::string rangeToString( InputIterator first, InputIterator last ); } //template //struct StringMaker > { // static std::string convert( std::vector const& v ) { // return Detail::rangeToString( v.begin(), v.end() ); // } //}; template std::string toString( std::vector const& v ) { return Detail::rangeToString( v.begin(), v.end() ); } #ifdef CATCH_CONFIG_CPP11_TUPLE // toString for tuples namespace TupleDetail { template< typename Tuple, std::size_t N = 0, bool = (N < std::tuple_size::value) > struct ElementPrinter { static void print( const Tuple& tuple, std::ostream& os ) { os << ( N ? ", " : " " ) << Catch::toString(std::get(tuple)); ElementPrinter::print(tuple,os); } }; template< typename Tuple, std::size_t N > struct ElementPrinter { static void print( const Tuple&, std::ostream& ) {} }; } template struct StringMaker> { static std::string convert( const std::tuple& tuple ) { std::ostringstream os; os << '{'; TupleDetail::ElementPrinter>::print( tuple, os ); os << " }"; return os.str(); } }; #endif // CATCH_CONFIG_CPP11_TUPLE namespace Detail { template std::string makeString( T const& value ) { return StringMaker::convert( value ); } } // end namespace Detail /// \brief converts any type to a string /// /// The default template forwards on to ostringstream - except when an /// ostringstream overload does not exist - in which case it attempts to detect /// that and writes {?}. /// Overload (not specialise) this template for custom typs that you don't want /// to provide an ostream overload for. template std::string toString( T const& value ) { return StringMaker::convert( value ); } namespace Detail { template std::string rangeToString( InputIterator first, InputIterator last ) { std::ostringstream oss; oss << "{ "; if( first != last ) { oss << Catch::toString( *first ); for( ++first ; first != last ; ++first ) oss << ", " << Catch::toString( *first ); } oss << " }"; return oss.str(); } } } // end namespace Catch namespace Catch { template class BinaryExpression; template class MatchExpression; // Wraps the LHS of an expression and overloads comparison operators // for also capturing those and RHS (if any) template class ExpressionLhs : public DecomposedExpression { public: ExpressionLhs( ResultBuilder& rb, T lhs ) : m_rb( rb ), m_lhs( lhs ), m_truthy(false) {} ExpressionLhs& operator = ( const ExpressionLhs& ); template BinaryExpression operator == ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator != ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator < ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator > ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator <= ( RhsT const& rhs ) { return captureExpression( rhs ); } template BinaryExpression operator >= ( RhsT const& rhs ) { return captureExpression( rhs ); } BinaryExpression operator == ( bool rhs ) { return captureExpression( rhs ); } BinaryExpression operator != ( bool rhs ) { return captureExpression( rhs ); } void endExpression() { m_truthy = m_lhs ? true : false; m_rb .setResultType( m_truthy ) .endExpression( *this ); } virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { dest = Catch::toString( m_lhs ); } private: template BinaryExpression captureExpression( RhsT& rhs ) const { return BinaryExpression( m_rb, m_lhs, rhs ); } template BinaryExpression captureExpression( bool rhs ) const { return BinaryExpression( m_rb, m_lhs, rhs ); } private: ResultBuilder& m_rb; T m_lhs; bool m_truthy; }; template class BinaryExpression : public DecomposedExpression { public: BinaryExpression( ResultBuilder& rb, LhsT lhs, RhsT rhs ) : m_rb( rb ), m_lhs( lhs ), m_rhs( rhs ) {} BinaryExpression& operator = ( BinaryExpression& ); void endExpression() const { m_rb .setResultType( Internal::compare( m_lhs, m_rhs ) ) .endExpression( *this ); } virtual bool isBinaryExpression() const CATCH_OVERRIDE { return true; } virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { std::string lhs = Catch::toString( m_lhs ); std::string rhs = Catch::toString( m_rhs ); char delim = lhs.size() + rhs.size() < 40 && lhs.find('\n') == std::string::npos && rhs.find('\n') == std::string::npos ? ' ' : '\n'; dest.reserve( 7 + lhs.size() + rhs.size() ); // 2 for spaces around operator // 2 for operator // 2 for parentheses (conditionally added later) // 1 for negation (conditionally added later) dest = lhs; dest += delim; dest += Internal::OperatorTraits::getName(); dest += delim; dest += rhs; } private: ResultBuilder& m_rb; LhsT m_lhs; RhsT m_rhs; }; template class MatchExpression : public DecomposedExpression { public: MatchExpression( ArgT arg, MatcherT matcher, char const* matcherString ) : m_arg( arg ), m_matcher( matcher ), m_matcherString( matcherString ) {} virtual bool isBinaryExpression() const CATCH_OVERRIDE { return true; } virtual void reconstructExpression( std::string& dest ) const CATCH_OVERRIDE { std::string matcherAsString = m_matcher.toString(); dest = Catch::toString( m_arg ); dest += ' '; if( matcherAsString == Detail::unprintableString ) dest += m_matcherString; else dest += matcherAsString; } private: ArgT m_arg; MatcherT m_matcher; char const* m_matcherString; }; } // end namespace Catch namespace Catch { template inline ExpressionLhs ResultBuilder::operator <= ( T const& operand ) { return ExpressionLhs( *this, operand ); } inline ExpressionLhs ResultBuilder::operator <= ( bool value ) { return ExpressionLhs( *this, value ); } template inline void ResultBuilder::captureMatch( ArgT const& arg, MatcherT const& matcher, char const* matcherString ) { MatchExpression expr( arg, matcher, matcherString ); setResultType( matcher.match( arg ) ); endExpression( expr ); } } // namespace Catch // #included from: catch_message.h #define TWOBLUECUBES_CATCH_MESSAGE_H_INCLUDED #include namespace Catch { struct MessageInfo { MessageInfo( std::string const& _macroName, SourceLineInfo const& _lineInfo, ResultWas::OfType _type ); std::string macroName; SourceLineInfo lineInfo; ResultWas::OfType type; std::string message; unsigned int sequence; bool operator == ( MessageInfo const& other ) const { return sequence == other.sequence; } bool operator < ( MessageInfo const& other ) const { return sequence < other.sequence; } private: static unsigned int globalCount; }; struct MessageBuilder { MessageBuilder( std::string const& macroName, SourceLineInfo const& lineInfo, ResultWas::OfType type ) : m_info( macroName, lineInfo, type ) {} template MessageBuilder& operator << ( T const& value ) { m_stream << value; return *this; } MessageInfo m_info; std::ostringstream m_stream; }; class ScopedMessage { public: ScopedMessage( MessageBuilder const& builder ); ScopedMessage( ScopedMessage const& other ); ~ScopedMessage(); MessageInfo m_info; }; } // end namespace Catch // #included from: catch_interfaces_capture.h #define TWOBLUECUBES_CATCH_INTERFACES_CAPTURE_H_INCLUDED #include namespace Catch { class TestCase; class AssertionResult; struct AssertionInfo; struct SectionInfo; struct SectionEndInfo; struct MessageInfo; class ScopedMessageBuilder; struct Counts; struct IResultCapture { virtual ~IResultCapture(); virtual void assertionEnded( AssertionResult const& result ) = 0; virtual bool sectionStarted( SectionInfo const& sectionInfo, Counts& assertions ) = 0; virtual void sectionEnded( SectionEndInfo const& endInfo ) = 0; virtual void sectionEndedEarly( SectionEndInfo const& endInfo ) = 0; virtual void pushScopedMessage( MessageInfo const& message ) = 0; virtual void popScopedMessage( MessageInfo const& message ) = 0; virtual std::string getCurrentTestName() const = 0; virtual const AssertionResult* getLastResult() const = 0; virtual void exceptionEarlyReported() = 0; virtual void handleFatalErrorCondition( std::string const& message ) = 0; }; IResultCapture& getResultCapture(); } // #included from: catch_debugger.h #define TWOBLUECUBES_CATCH_DEBUGGER_H_INCLUDED // #included from: catch_platform.h #define TWOBLUECUBES_CATCH_PLATFORM_H_INCLUDED #if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) # define CATCH_PLATFORM_MAC #elif defined(__IPHONE_OS_VERSION_MIN_REQUIRED) # define CATCH_PLATFORM_IPHONE #elif defined(linux) || defined(__linux) || defined(__linux__) # define CATCH_PLATFORM_LINUX #elif defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) # define CATCH_PLATFORM_WINDOWS # if !defined(NOMINMAX) && !defined(CATCH_CONFIG_NO_NOMINMAX) # define CATCH_DEFINES_NOMINMAX # endif # if !defined(WIN32_LEAN_AND_MEAN) && !defined(CATCH_CONFIG_NO_WIN32_LEAN_AND_MEAN) # define CATCH_DEFINES_WIN32_LEAN_AND_MEAN # endif #endif #include namespace Catch{ bool isDebuggerActive(); void writeToDebugConsole( std::string const& text ); } #ifdef CATCH_PLATFORM_MAC // The following code snippet based on: // http://cocoawithlove.com/2008/03/break-into-debugger.html #if defined(__ppc64__) || defined(__ppc__) #define CATCH_TRAP() \ __asm__("li r0, 20\nsc\nnop\nli r0, 37\nli r4, 2\nsc\nnop\n" \ : : : "memory","r0","r3","r4" ) // backported from Catch2 // revision b9853b4b356b83bb580c746c3a1f11101f9af54f // src/catch2/internal/catch_debugger.hpp #elif defined(__i386__) || defined(__x86_64__) #define CATCH_TRAP() __asm__("int $3\n" : : ) /* NOLINT */ #elif defined(__aarch64__) #define CATCH_TRAP() __asm__(".inst 0xd4200000") #endif #elif defined(CATCH_PLATFORM_LINUX) // If we can use inline assembler, do it because this allows us to break // directly at the location of the failing check instead of breaking inside // raise() called from it, i.e. one stack frame below. #if defined(__GNUC__) && (defined(__i386) || defined(__x86_64)) #define CATCH_TRAP() asm volatile ("int $3") #else // Fall back to the generic way. #include #define CATCH_TRAP() raise(SIGTRAP) #endif #elif defined(_MSC_VER) #define CATCH_TRAP() __debugbreak() #elif defined(__MINGW32__) extern "C" __declspec(dllimport) void __stdcall DebugBreak(); #define CATCH_TRAP() DebugBreak() #endif #ifdef CATCH_TRAP #define CATCH_BREAK_INTO_DEBUGGER() if( Catch::isDebuggerActive() ) { CATCH_TRAP(); } #else #define CATCH_BREAK_INTO_DEBUGGER() Catch::alwaysTrue(); #endif // #included from: catch_interfaces_runner.h #define TWOBLUECUBES_CATCH_INTERFACES_RUNNER_H_INCLUDED namespace Catch { class TestCase; struct IRunner { virtual ~IRunner(); virtual bool aborting() const = 0; }; } #if defined(CATCH_CONFIG_FAST_COMPILE) /////////////////////////////////////////////////////////////////////////////// // We can speedup compilation significantly by breaking into debugger lower in // the callstack, because then we don't have to expand CATCH_BREAK_INTO_DEBUGGER // macro in each assertion #define INTERNAL_CATCH_REACT( resultBuilder ) \ resultBuilder.react(); /////////////////////////////////////////////////////////////////////////////// // Another way to speed-up compilation is to omit local try-catch for REQUIRE* // macros. // This can potentially cause false negative, if the test code catches // the exception before it propagates back up to the runner. #define INTERNAL_CATCH_TEST_NO_TRY( macroName, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ __catchResult.setExceptionGuard(); \ CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ ( __catchResult <= expr ).endExpression(); \ CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ __catchResult.unsetExceptionGuard(); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::isTrue( false && static_cast( !!(expr) ) ) ) // expr here is never evaluated at runtime but it forces the compiler to give it a look // The double negation silences MSVC's C4800 warning, the static_cast forces short-circuit evaluation if the type has overloaded &&. #define INTERNAL_CHECK_THAT_NO_TRY( macroName, matcher, resultDisposition, arg ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #arg ", " #matcher, resultDisposition ); \ __catchResult.setExceptionGuard(); \ __catchResult.captureMatch( arg, matcher, #matcher ); \ __catchResult.unsetExceptionGuard(); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) #else /////////////////////////////////////////////////////////////////////////////// // In the event of a failure works out if the debugger needs to be invoked // and/or an exception thrown and takes appropriate action. // This needs to be done as a macro so the debugger will stop in the user // source code rather than in Catch library code #define INTERNAL_CATCH_REACT( resultBuilder ) \ if( resultBuilder.shouldDebugBreak() ) CATCH_BREAK_INTO_DEBUGGER(); \ resultBuilder.react(); #endif /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ try { \ CATCH_INTERNAL_SUPPRESS_PARENTHESES_WARNINGS \ ( __catchResult <= expr ).endExpression(); \ CATCH_INTERNAL_UNSUPPRESS_PARENTHESES_WARNINGS \ } \ catch( ... ) { \ __catchResult.useActiveException( resultDisposition ); \ } \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::isTrue( false && static_cast( !!(expr) ) ) ) // expr here is never evaluated at runtime but it forces the compiler to give it a look // The double negation silences MSVC's C4800 warning, the static_cast forces short-circuit evaluation if the type has overloaded &&. /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_IF( macroName, resultDisposition, expr ) \ INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ); \ if( Catch::getResultCapture().getLastResult()->succeeded() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_ELSE( macroName, resultDisposition, expr ) \ INTERNAL_CATCH_TEST( macroName, resultDisposition, expr ); \ if( !Catch::getResultCapture().getLastResult()->succeeded() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_NO_THROW( macroName, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition ); \ try { \ static_cast(expr); \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ } \ catch( ... ) { \ __catchResult.useActiveException( resultDisposition ); \ } \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_THROWS( macroName, resultDisposition, matcher, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr, resultDisposition, #matcher ); \ if( __catchResult.allowThrows() ) \ try { \ static_cast(expr); \ __catchResult.captureResult( Catch::ResultWas::DidntThrowException ); \ } \ catch( ... ) { \ __catchResult.captureExpectedException( matcher ); \ } \ else \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_THROWS_AS( macroName, exceptionType, resultDisposition, expr ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #expr ", " #exceptionType, resultDisposition ); \ if( __catchResult.allowThrows() ) \ try { \ static_cast(expr); \ __catchResult.captureResult( Catch::ResultWas::DidntThrowException ); \ } \ catch( const exceptionType& ) { \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ } \ catch( ... ) { \ __catchResult.useActiveException( resultDisposition ); \ } \ else \ __catchResult.captureResult( Catch::ResultWas::Ok ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) /////////////////////////////////////////////////////////////////////////////// #ifdef CATCH_CONFIG_VARIADIC_MACROS #define INTERNAL_CATCH_MSG( macroName, messageType, resultDisposition, ... ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, "", resultDisposition ); \ __catchResult << __VA_ARGS__ + ::Catch::StreamEndStop(); \ __catchResult.captureResult( messageType ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) #else #define INTERNAL_CATCH_MSG( macroName, messageType, resultDisposition, log ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, "", resultDisposition ); \ __catchResult << log + ::Catch::StreamEndStop(); \ __catchResult.captureResult( messageType ); \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) #endif /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_INFO( macroName, log ) \ Catch::ScopedMessage INTERNAL_CATCH_UNIQUE_NAME( scopedMessage ) = Catch::MessageBuilder( macroName, CATCH_INTERNAL_LINEINFO, Catch::ResultWas::Info ) << log; /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CHECK_THAT( macroName, matcher, resultDisposition, arg ) \ do { \ Catch::ResultBuilder __catchResult( macroName, CATCH_INTERNAL_LINEINFO, #arg ", " #matcher, resultDisposition ); \ try { \ __catchResult.captureMatch( arg, matcher, #matcher ); \ } catch( ... ) { \ __catchResult.useActiveException( resultDisposition | Catch::ResultDisposition::ContinueOnFailure ); \ } \ INTERNAL_CATCH_REACT( __catchResult ) \ } while( Catch::alwaysFalse() ) // #included from: internal/catch_section.h #define TWOBLUECUBES_CATCH_SECTION_H_INCLUDED // #included from: catch_section_info.h #define TWOBLUECUBES_CATCH_SECTION_INFO_H_INCLUDED // #included from: catch_totals.hpp #define TWOBLUECUBES_CATCH_TOTALS_HPP_INCLUDED #include namespace Catch { struct Counts { Counts() : passed( 0 ), failed( 0 ), failedButOk( 0 ) {} Counts operator - ( Counts const& other ) const { Counts diff; diff.passed = passed - other.passed; diff.failed = failed - other.failed; diff.failedButOk = failedButOk - other.failedButOk; return diff; } Counts& operator += ( Counts const& other ) { passed += other.passed; failed += other.failed; failedButOk += other.failedButOk; return *this; } std::size_t total() const { return passed + failed + failedButOk; } bool allPassed() const { return failed == 0 && failedButOk == 0; } bool allOk() const { return failed == 0; } std::size_t passed; std::size_t failed; std::size_t failedButOk; }; struct Totals { Totals operator - ( Totals const& other ) const { Totals diff; diff.assertions = assertions - other.assertions; diff.testCases = testCases - other.testCases; return diff; } Totals delta( Totals const& prevTotals ) const { Totals diff = *this - prevTotals; if( diff.assertions.failed > 0 ) ++diff.testCases.failed; else if( diff.assertions.failedButOk > 0 ) ++diff.testCases.failedButOk; else ++diff.testCases.passed; return diff; } Totals& operator += ( Totals const& other ) { assertions += other.assertions; testCases += other.testCases; return *this; } Counts assertions; Counts testCases; }; } #include namespace Catch { struct SectionInfo { SectionInfo ( SourceLineInfo const& _lineInfo, std::string const& _name, std::string const& _description = std::string() ); std::string name; std::string description; SourceLineInfo lineInfo; }; struct SectionEndInfo { SectionEndInfo( SectionInfo const& _sectionInfo, Counts const& _prevAssertions, double _durationInSeconds ) : sectionInfo( _sectionInfo ), prevAssertions( _prevAssertions ), durationInSeconds( _durationInSeconds ) {} SectionInfo sectionInfo; Counts prevAssertions; double durationInSeconds; }; } // end namespace Catch // #included from: catch_timer.h #define TWOBLUECUBES_CATCH_TIMER_H_INCLUDED #ifdef _MSC_VER namespace Catch { typedef unsigned long long UInt64; } #else #include namespace Catch { typedef uint64_t UInt64; } #endif namespace Catch { class Timer { public: Timer() : m_ticks( 0 ) {} void start(); unsigned int getElapsedMicroseconds() const; unsigned int getElapsedMilliseconds() const; double getElapsedSeconds() const; private: UInt64 m_ticks; }; } // namespace Catch #include namespace Catch { class Section : NonCopyable { public: Section( SectionInfo const& info ); ~Section(); // This indicates whether the section should be executed or not operator bool() const; private: SectionInfo m_info; std::string m_name; Counts m_assertions; bool m_sectionIncluded; Timer m_timer; }; } // end namespace Catch #ifdef CATCH_CONFIG_VARIADIC_MACROS #define INTERNAL_CATCH_SECTION( ... ) \ if( Catch::Section const& INTERNAL_CATCH_UNIQUE_NAME( catch_internal_Section ) = Catch::SectionInfo( CATCH_INTERNAL_LINEINFO, __VA_ARGS__ ) ) #else #define INTERNAL_CATCH_SECTION( name, desc ) \ if( Catch::Section const& INTERNAL_CATCH_UNIQUE_NAME( catch_internal_Section ) = Catch::SectionInfo( CATCH_INTERNAL_LINEINFO, name, desc ) ) #endif // #included from: internal/catch_generators.hpp #define TWOBLUECUBES_CATCH_GENERATORS_HPP_INCLUDED #include #include #include namespace Catch { template struct IGenerator { virtual ~IGenerator() {} virtual T getValue( std::size_t index ) const = 0; virtual std::size_t size () const = 0; }; template class BetweenGenerator : public IGenerator { public: BetweenGenerator( T from, T to ) : m_from( from ), m_to( to ){} virtual T getValue( std::size_t index ) const { return m_from+static_cast( index ); } virtual std::size_t size() const { return static_cast( 1+m_to-m_from ); } private: T m_from; T m_to; }; template class ValuesGenerator : public IGenerator { public: ValuesGenerator(){} void add( T value ) { m_values.push_back( value ); } virtual T getValue( std::size_t index ) const { return m_values[index]; } virtual std::size_t size() const { return m_values.size(); } private: std::vector m_values; }; template class CompositeGenerator { public: CompositeGenerator() : m_totalSize( 0 ) {} // *** Move semantics, similar to auto_ptr *** CompositeGenerator( CompositeGenerator& other ) : m_fileInfo( other.m_fileInfo ), m_totalSize( 0 ) { move( other ); } CompositeGenerator& setFileInfo( const char* fileInfo ) { m_fileInfo = fileInfo; return *this; } ~CompositeGenerator() { deleteAll( m_composed ); } operator T () const { size_t overallIndex = getCurrentContext().getGeneratorIndex( m_fileInfo, m_totalSize ); typename std::vector*>::const_iterator it = m_composed.begin(); typename std::vector*>::const_iterator itEnd = m_composed.end(); for( size_t index = 0; it != itEnd; ++it ) { const IGenerator* generator = *it; if( overallIndex >= index && overallIndex < index + generator->size() ) { return generator->getValue( overallIndex-index ); } index += generator->size(); } CATCH_INTERNAL_ERROR( "Indexed past end of generated range" ); return T(); // Suppress spurious "not all control paths return a value" warning in Visual Studio - if you know how to fix this please do so } void add( const IGenerator* generator ) { m_totalSize += generator->size(); m_composed.push_back( generator ); } CompositeGenerator& then( CompositeGenerator& other ) { move( other ); return *this; } CompositeGenerator& then( T value ) { ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( value ); add( valuesGen ); return *this; } private: void move( CompositeGenerator& other ) { m_composed.insert( m_composed.end(), other.m_composed.begin(), other.m_composed.end() ); m_totalSize += other.m_totalSize; other.m_composed.clear(); } std::vector*> m_composed; std::string m_fileInfo; size_t m_totalSize; }; namespace Generators { template CompositeGenerator between( T from, T to ) { CompositeGenerator generators; generators.add( new BetweenGenerator( from, to ) ); return generators; } template CompositeGenerator values( T val1, T val2 ) { CompositeGenerator generators; ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( val1 ); valuesGen->add( val2 ); generators.add( valuesGen ); return generators; } template CompositeGenerator values( T val1, T val2, T val3 ){ CompositeGenerator generators; ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( val1 ); valuesGen->add( val2 ); valuesGen->add( val3 ); generators.add( valuesGen ); return generators; } template CompositeGenerator values( T val1, T val2, T val3, T val4 ) { CompositeGenerator generators; ValuesGenerator* valuesGen = new ValuesGenerator(); valuesGen->add( val1 ); valuesGen->add( val2 ); valuesGen->add( val3 ); valuesGen->add( val4 ); generators.add( valuesGen ); return generators; } } // end namespace Generators using namespace Generators; } // end namespace Catch #define INTERNAL_CATCH_LINESTR2( line ) #line #define INTERNAL_CATCH_LINESTR( line ) INTERNAL_CATCH_LINESTR2( line ) #define INTERNAL_CATCH_GENERATE( expr ) expr.setFileInfo( __FILE__ "(" INTERNAL_CATCH_LINESTR( __LINE__ ) ")" ) // #included from: internal/catch_interfaces_exception.h #define TWOBLUECUBES_CATCH_INTERFACES_EXCEPTION_H_INCLUDED #include #include // #included from: catch_interfaces_registry_hub.h #define TWOBLUECUBES_CATCH_INTERFACES_REGISTRY_HUB_H_INCLUDED #include namespace Catch { class TestCase; struct ITestCaseRegistry; struct IExceptionTranslatorRegistry; struct IExceptionTranslator; struct IReporterRegistry; struct IReporterFactory; struct ITagAliasRegistry; struct IRegistryHub { virtual ~IRegistryHub(); virtual IReporterRegistry const& getReporterRegistry() const = 0; virtual ITestCaseRegistry const& getTestCaseRegistry() const = 0; virtual ITagAliasRegistry const& getTagAliasRegistry() const = 0; virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry() = 0; }; struct IMutableRegistryHub { virtual ~IMutableRegistryHub(); virtual void registerReporter( std::string const& name, Ptr const& factory ) = 0; virtual void registerListener( Ptr const& factory ) = 0; virtual void registerTest( TestCase const& testInfo ) = 0; virtual void registerTranslator( const IExceptionTranslator* translator ) = 0; virtual void registerTagAlias( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) = 0; }; IRegistryHub& getRegistryHub(); IMutableRegistryHub& getMutableRegistryHub(); void cleanUp(); std::string translateActiveException(); } namespace Catch { typedef std::string(*exceptionTranslateFunction)(); struct IExceptionTranslator; typedef std::vector ExceptionTranslators; struct IExceptionTranslator { virtual ~IExceptionTranslator(); virtual std::string translate( ExceptionTranslators::const_iterator it, ExceptionTranslators::const_iterator itEnd ) const = 0; }; struct IExceptionTranslatorRegistry { virtual ~IExceptionTranslatorRegistry(); virtual std::string translateActiveException() const = 0; }; class ExceptionTranslatorRegistrar { template class ExceptionTranslator : public IExceptionTranslator { public: ExceptionTranslator( std::string(*translateFunction)( T& ) ) : m_translateFunction( translateFunction ) {} virtual std::string translate( ExceptionTranslators::const_iterator it, ExceptionTranslators::const_iterator itEnd ) const CATCH_OVERRIDE { try { if( it == itEnd ) throw; else return (*it)->translate( it+1, itEnd ); } catch( T& ex ) { return m_translateFunction( ex ); } } protected: std::string(*m_translateFunction)( T& ); }; public: template ExceptionTranslatorRegistrar( std::string(*translateFunction)( T& ) ) { getMutableRegistryHub().registerTranslator ( new ExceptionTranslator( translateFunction ) ); } }; } /////////////////////////////////////////////////////////////////////////////// #define INTERNAL_CATCH_TRANSLATE_EXCEPTION2( translatorName, signature ) \ static std::string translatorName( signature ); \ namespace{ Catch::ExceptionTranslatorRegistrar INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionRegistrar )( &translatorName ); }\ static std::string translatorName( signature ) #define INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature ) INTERNAL_CATCH_TRANSLATE_EXCEPTION2( INTERNAL_CATCH_UNIQUE_NAME( catch_internal_ExceptionTranslator ), signature ) // #included from: internal/catch_approx.hpp #define TWOBLUECUBES_CATCH_APPROX_HPP_INCLUDED #include #include #if defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) #include #endif namespace Catch { namespace Detail { class Approx { public: explicit Approx ( double value ) : m_epsilon( std::numeric_limits::epsilon()*100 ), m_margin( 0.0 ), m_scale( 1.0 ), m_value( value ) {} Approx( Approx const& other ) : m_epsilon( other.m_epsilon ), m_margin( other.m_margin ), m_scale( other.m_scale ), m_value( other.m_value ) {} static Approx custom() { return Approx( 0 ); } #if defined(CATCH_CONFIG_CPP11_TYPE_TRAITS) template ::value>::type> Approx operator()( T value ) { Approx approx( static_cast(value) ); approx.epsilon( m_epsilon ); approx.margin( m_margin ); approx.scale( m_scale ); return approx; } template ::value>::type> explicit Approx( T value ): Approx(static_cast(value)) {} template ::value>::type> friend bool operator == ( const T& lhs, Approx const& rhs ) { // Thanks to Richard Harris for his help refining this formula auto lhs_v = double(lhs); bool relativeOK = std::fabs(lhs_v - rhs.m_value) < rhs.m_epsilon * (rhs.m_scale + (std::max)(std::fabs(lhs_v), std::fabs(rhs.m_value))); if (relativeOK) { return true; } return std::fabs(lhs_v - rhs.m_value) < rhs.m_margin; } template ::value>::type> friend bool operator == ( Approx const& lhs, const T& rhs ) { return operator==( rhs, lhs ); } template ::value>::type> friend bool operator != ( T lhs, Approx const& rhs ) { return !operator==( lhs, rhs ); } template ::value>::type> friend bool operator != ( Approx const& lhs, T rhs ) { return !operator==( rhs, lhs ); } template ::value>::type> friend bool operator <= ( T lhs, Approx const& rhs ) { return double(lhs) < rhs.m_value || lhs == rhs; } template ::value>::type> friend bool operator <= ( Approx const& lhs, T rhs ) { return lhs.m_value < double(rhs) || lhs == rhs; } template ::value>::type> friend bool operator >= ( T lhs, Approx const& rhs ) { return double(lhs) > rhs.m_value || lhs == rhs; } template ::value>::type> friend bool operator >= ( Approx const& lhs, T rhs ) { return lhs.m_value > double(rhs) || lhs == rhs; } template ::value>::type> Approx& epsilon( T newEpsilon ) { m_epsilon = double(newEpsilon); return *this; } template ::value>::type> Approx& margin( T newMargin ) { m_margin = double(newMargin); return *this; } template ::value>::type> Approx& scale( T newScale ) { m_scale = double(newScale); return *this; } #else Approx operator()( double value ) { Approx approx( value ); approx.epsilon( m_epsilon ); approx.margin( m_margin ); approx.scale( m_scale ); return approx; } friend bool operator == ( double lhs, Approx const& rhs ) { // Thanks to Richard Harris for his help refining this formula bool relativeOK = std::fabs( lhs - rhs.m_value ) < rhs.m_epsilon * (rhs.m_scale + (std::max)( std::fabs(lhs), std::fabs(rhs.m_value) ) ); if (relativeOK) { return true; } return std::fabs(lhs - rhs.m_value) < rhs.m_margin; } friend bool operator == ( Approx const& lhs, double rhs ) { return operator==( rhs, lhs ); } friend bool operator != ( double lhs, Approx const& rhs ) { return !operator==( lhs, rhs ); } friend bool operator != ( Approx const& lhs, double rhs ) { return !operator==( rhs, lhs ); } friend bool operator <= ( double lhs, Approx const& rhs ) { return lhs < rhs.m_value || lhs == rhs; } friend bool operator <= ( Approx const& lhs, double rhs ) { return lhs.m_value < rhs || lhs == rhs; } friend bool operator >= ( double lhs, Approx const& rhs ) { return lhs > rhs.m_value || lhs == rhs; } friend bool operator >= ( Approx const& lhs, double rhs ) { return lhs.m_value > rhs || lhs == rhs; } Approx& epsilon( double newEpsilon ) { m_epsilon = newEpsilon; return *this; } Approx& margin( double newMargin ) { m_margin = newMargin; return *this; } Approx& scale( double newScale ) { m_scale = newScale; return *this; } #endif std::string toString() const { std::ostringstream oss; oss << "Approx( " << Catch::toString( m_value ) << " )"; return oss.str(); } private: double m_epsilon; double m_margin; double m_scale; double m_value; }; } template<> inline std::string toString( Detail::Approx const& value ) { return value.toString(); } } // end namespace Catch // #included from: internal/catch_matchers_string.h #define TWOBLUECUBES_CATCH_MATCHERS_STRING_H_INCLUDED namespace Catch { namespace Matchers { namespace StdString { struct CasedString { CasedString( std::string const& str, CaseSensitive::Choice caseSensitivity ); std::string adjustString( std::string const& str ) const; std::string caseSensitivitySuffix() const; CaseSensitive::Choice m_caseSensitivity; std::string m_str; }; struct StringMatcherBase : MatcherBase { StringMatcherBase( std::string const& operation, CasedString const& comparator ); virtual std::string describe() const CATCH_OVERRIDE; CasedString m_comparator; std::string m_operation; }; struct EqualsMatcher : StringMatcherBase { EqualsMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; struct ContainsMatcher : StringMatcherBase { ContainsMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; struct StartsWithMatcher : StringMatcherBase { StartsWithMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; struct EndsWithMatcher : StringMatcherBase { EndsWithMatcher( CasedString const& comparator ); virtual bool match( std::string const& source ) const CATCH_OVERRIDE; }; } // namespace StdString // The following functions create the actual matcher objects. // This allows the types to be inferred StdString::EqualsMatcher Equals( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); StdString::ContainsMatcher Contains( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); StdString::EndsWithMatcher EndsWith( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); StdString::StartsWithMatcher StartsWith( std::string const& str, CaseSensitive::Choice caseSensitivity = CaseSensitive::Yes ); } // namespace Matchers } // namespace Catch // #included from: internal/catch_matchers_vector.h #define TWOBLUECUBES_CATCH_MATCHERS_VECTOR_H_INCLUDED namespace Catch { namespace Matchers { namespace Vector { template struct ContainsElementMatcher : MatcherBase, T> { ContainsElementMatcher(T const &comparator) : m_comparator( comparator) {} bool match(std::vector const &v) const CATCH_OVERRIDE { return std::find(v.begin(), v.end(), m_comparator) != v.end(); } virtual std::string describe() const CATCH_OVERRIDE { return "Contains: " + Catch::toString( m_comparator ); } T const& m_comparator; }; template struct ContainsMatcher : MatcherBase, std::vector > { ContainsMatcher(std::vector const &comparator) : m_comparator( comparator ) {} bool match(std::vector const &v) const CATCH_OVERRIDE { // !TBD: see note in EqualsMatcher if (m_comparator.size() > v.size()) return false; for (size_t i = 0; i < m_comparator.size(); ++i) if (std::find(v.begin(), v.end(), m_comparator[i]) == v.end()) return false; return true; } virtual std::string describe() const CATCH_OVERRIDE { return "Contains: " + Catch::toString( m_comparator ); } std::vector const& m_comparator; }; template struct EqualsMatcher : MatcherBase, std::vector > { EqualsMatcher(std::vector const &comparator) : m_comparator( comparator ) {} bool match(std::vector const &v) const CATCH_OVERRIDE { // !TBD: This currently works if all elements can be compared using != // - a more general approach would be via a compare template that defaults // to using !=. but could be specialised for, e.g. std::vector etc // - then just call that directly if (m_comparator.size() != v.size()) return false; for (size_t i = 0; i < v.size(); ++i) if (m_comparator[i] != v[i]) return false; return true; } virtual std::string describe() const CATCH_OVERRIDE { return "Equals: " + Catch::toString( m_comparator ); } std::vector const& m_comparator; }; } // namespace Vector // The following functions create the actual matcher objects. // This allows the types to be inferred template Vector::ContainsMatcher Contains( std::vector const& comparator ) { return Vector::ContainsMatcher( comparator ); } template Vector::ContainsElementMatcher VectorContains( T const& comparator ) { return Vector::ContainsElementMatcher( comparator ); } template Vector::EqualsMatcher Equals( std::vector const& comparator ) { return Vector::EqualsMatcher( comparator ); } } // namespace Matchers } // namespace Catch // #included from: internal/catch_interfaces_tag_alias_registry.h #define TWOBLUECUBES_CATCH_INTERFACES_TAG_ALIAS_REGISTRY_H_INCLUDED // #included from: catch_tag_alias.h #define TWOBLUECUBES_CATCH_TAG_ALIAS_H_INCLUDED #include namespace Catch { struct TagAlias { TagAlias( std::string const& _tag, SourceLineInfo _lineInfo ) : tag( _tag ), lineInfo( _lineInfo ) {} std::string tag; SourceLineInfo lineInfo; }; struct RegistrarForTagAliases { RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo ); }; } // end namespace Catch #define CATCH_REGISTER_TAG_ALIAS( alias, spec ) namespace{ Catch::RegistrarForTagAliases INTERNAL_CATCH_UNIQUE_NAME( AutoRegisterTagAlias )( alias, spec, CATCH_INTERNAL_LINEINFO ); } // #included from: catch_option.hpp #define TWOBLUECUBES_CATCH_OPTION_HPP_INCLUDED namespace Catch { // An optional type template class Option { public: Option() : nullableValue( CATCH_NULL ) {} Option( T const& _value ) : nullableValue( new( storage ) T( _value ) ) {} Option( Option const& _other ) : nullableValue( _other ? new( storage ) T( *_other ) : CATCH_NULL ) {} ~Option() { reset(); } Option& operator= ( Option const& _other ) { if( &_other != this ) { reset(); if( _other ) nullableValue = new( storage ) T( *_other ); } return *this; } Option& operator = ( T const& _value ) { reset(); nullableValue = new( storage ) T( _value ); return *this; } void reset() { if( nullableValue ) nullableValue->~T(); nullableValue = CATCH_NULL; } T& operator*() { return *nullableValue; } T const& operator*() const { return *nullableValue; } T* operator->() { return nullableValue; } const T* operator->() const { return nullableValue; } T valueOr( T const& defaultValue ) const { return nullableValue ? *nullableValue : defaultValue; } bool some() const { return nullableValue != CATCH_NULL; } bool none() const { return nullableValue == CATCH_NULL; } bool operator !() const { return nullableValue == CATCH_NULL; } operator SafeBool::type() const { return SafeBool::makeSafe( some() ); } private: T *nullableValue; union { char storage[sizeof(T)]; // These are here to force alignment for the storage long double dummy1; void (*dummy2)(); long double dummy3; #ifdef CATCH_CONFIG_CPP11_LONG_LONG long long dummy4; #endif }; }; } // end namespace Catch namespace Catch { struct ITagAliasRegistry { virtual ~ITagAliasRegistry(); virtual Option find( std::string const& alias ) const = 0; virtual std::string expandAliases( std::string const& unexpandedTestSpec ) const = 0; static ITagAliasRegistry const& get(); }; } // end namespace Catch // These files are included here so the single_include script doesn't put them // in the conditionally compiled sections // #included from: internal/catch_test_case_info.h #define TWOBLUECUBES_CATCH_TEST_CASE_INFO_H_INCLUDED #include #include #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif namespace Catch { struct ITestCase; struct TestCaseInfo { enum SpecialProperties{ None = 0, IsHidden = 1 << 1, ShouldFail = 1 << 2, MayFail = 1 << 3, Throws = 1 << 4, NonPortable = 1 << 5 }; TestCaseInfo( std::string const& _name, std::string const& _className, std::string const& _description, std::set const& _tags, SourceLineInfo const& _lineInfo ); TestCaseInfo( TestCaseInfo const& other ); friend void setTags( TestCaseInfo& testCaseInfo, std::set const& tags ); bool isHidden() const; bool throws() const; bool okToFail() const; bool expectedToFail() const; std::string name; std::string className; std::string description; std::set tags; std::set lcaseTags; std::string tagsAsString; SourceLineInfo lineInfo; SpecialProperties properties; }; class TestCase : public TestCaseInfo { public: TestCase( ITestCase* testCase, TestCaseInfo const& info ); TestCase( TestCase const& other ); TestCase withName( std::string const& _newName ) const; void invoke() const; TestCaseInfo const& getTestCaseInfo() const; void swap( TestCase& other ); bool operator == ( TestCase const& other ) const; bool operator < ( TestCase const& other ) const; TestCase& operator = ( TestCase const& other ); private: Ptr test; }; TestCase makeTestCase( ITestCase* testCase, std::string const& className, std::string const& name, std::string const& description, SourceLineInfo const& lineInfo ); } #ifdef __clang__ # pragma clang diagnostic pop #endif #ifdef __OBJC__ // #included from: internal/catch_objc.hpp #define TWOBLUECUBES_CATCH_OBJC_HPP_INCLUDED #import #include // NB. Any general catch headers included here must be included // in catch.hpp first to make sure they are included by the single // header for non obj-usage /////////////////////////////////////////////////////////////////////////////// // This protocol is really only here for (self) documenting purposes, since // all its methods are optional. @protocol OcFixture @optional -(void) setUp; -(void) tearDown; @end namespace Catch { class OcMethod : public SharedImpl { public: OcMethod( Class cls, SEL sel ) : m_cls( cls ), m_sel( sel ) {} virtual void invoke() const { id obj = [[m_cls alloc] init]; performOptionalSelector( obj, @selector(setUp) ); performOptionalSelector( obj, m_sel ); performOptionalSelector( obj, @selector(tearDown) ); arcSafeRelease( obj ); } private: virtual ~OcMethod() {} Class m_cls; SEL m_sel; }; namespace Detail{ inline std::string getAnnotation( Class cls, std::string const& annotationName, std::string const& testCaseName ) { NSString* selStr = [[NSString alloc] initWithFormat:@"Catch_%s_%s", annotationName.c_str(), testCaseName.c_str()]; SEL sel = NSSelectorFromString( selStr ); arcSafeRelease( selStr ); id value = performOptionalSelector( cls, sel ); if( value ) return [(NSString*)value UTF8String]; return ""; } } inline size_t registerTestMethods() { size_t noTestMethods = 0; int noClasses = objc_getClassList( CATCH_NULL, 0 ); Class* classes = (CATCH_UNSAFE_UNRETAINED Class *)malloc( sizeof(Class) * noClasses); objc_getClassList( classes, noClasses ); for( int c = 0; c < noClasses; c++ ) { Class cls = classes[c]; { u_int count; Method* methods = class_copyMethodList( cls, &count ); for( u_int m = 0; m < count ; m++ ) { SEL selector = method_getName(methods[m]); std::string methodName = sel_getName(selector); if( startsWith( methodName, "Catch_TestCase_" ) ) { std::string testCaseName = methodName.substr( 15 ); std::string name = Detail::getAnnotation( cls, "Name", testCaseName ); std::string desc = Detail::getAnnotation( cls, "Description", testCaseName ); const char* className = class_getName( cls ); getMutableRegistryHub().registerTest( makeTestCase( new OcMethod( cls, selector ), className, name.c_str(), desc.c_str(), SourceLineInfo() ) ); noTestMethods++; } } free(methods); } } return noTestMethods; } namespace Matchers { namespace Impl { namespace NSStringMatchers { struct StringHolder : MatcherBase{ StringHolder( NSString* substr ) : m_substr( [substr copy] ){} StringHolder( StringHolder const& other ) : m_substr( [other.m_substr copy] ){} StringHolder() { arcSafeRelease( m_substr ); } virtual bool match( NSString* arg ) const CATCH_OVERRIDE { return false; } NSString* m_substr; }; struct Equals : StringHolder { Equals( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const CATCH_OVERRIDE { return (str != nil || m_substr == nil ) && [str isEqualToString:m_substr]; } virtual std::string describe() const CATCH_OVERRIDE { return "equals string: " + Catch::toString( m_substr ); } }; struct Contains : StringHolder { Contains( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const { return (str != nil || m_substr == nil ) && [str rangeOfString:m_substr].location != NSNotFound; } virtual std::string describe() const CATCH_OVERRIDE { return "contains string: " + Catch::toString( m_substr ); } }; struct StartsWith : StringHolder { StartsWith( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const { return (str != nil || m_substr == nil ) && [str rangeOfString:m_substr].location == 0; } virtual std::string describe() const CATCH_OVERRIDE { return "starts with: " + Catch::toString( m_substr ); } }; struct EndsWith : StringHolder { EndsWith( NSString* substr ) : StringHolder( substr ){} virtual bool match( NSString* str ) const { return (str != nil || m_substr == nil ) && [str rangeOfString:m_substr].location == [str length] - [m_substr length]; } virtual std::string describe() const CATCH_OVERRIDE { return "ends with: " + Catch::toString( m_substr ); } }; } // namespace NSStringMatchers } // namespace Impl inline Impl::NSStringMatchers::Equals Equals( NSString* substr ){ return Impl::NSStringMatchers::Equals( substr ); } inline Impl::NSStringMatchers::Contains Contains( NSString* substr ){ return Impl::NSStringMatchers::Contains( substr ); } inline Impl::NSStringMatchers::StartsWith StartsWith( NSString* substr ){ return Impl::NSStringMatchers::StartsWith( substr ); } inline Impl::NSStringMatchers::EndsWith EndsWith( NSString* substr ){ return Impl::NSStringMatchers::EndsWith( substr ); } } // namespace Matchers using namespace Matchers; } // namespace Catch /////////////////////////////////////////////////////////////////////////////// #define OC_TEST_CASE( name, desc )\ +(NSString*) INTERNAL_CATCH_UNIQUE_NAME( Catch_Name_test ) \ {\ return @ name; \ }\ +(NSString*) INTERNAL_CATCH_UNIQUE_NAME( Catch_Description_test ) \ { \ return @ desc; \ } \ -(void) INTERNAL_CATCH_UNIQUE_NAME( Catch_TestCase_test ) #endif #ifdef CATCH_IMPL // !TBD: Move the leak detector code into a separate header #ifdef CATCH_CONFIG_WINDOWS_CRTDBG #include class LeakDetector { public: LeakDetector() { int flag = _CrtSetDbgFlag(_CRTDBG_REPORT_FLAG); flag |= _CRTDBG_LEAK_CHECK_DF; flag |= _CRTDBG_ALLOC_MEM_DF; _CrtSetDbgFlag(flag); _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE | _CRTDBG_MODE_DEBUG); _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR); // Change this to leaking allocation's number to break there _CrtSetBreakAlloc(-1); } }; #else class LeakDetector {}; #endif LeakDetector leakDetector; // #included from: internal/catch_impl.hpp #define TWOBLUECUBES_CATCH_IMPL_HPP_INCLUDED // Collect all the implementation files together here // These are the equivalent of what would usually be cpp files #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wweak-vtables" #endif // #included from: ../catch_session.hpp #define TWOBLUECUBES_CATCH_RUNNER_HPP_INCLUDED // #included from: internal/catch_commandline.hpp #define TWOBLUECUBES_CATCH_COMMANDLINE_HPP_INCLUDED // #included from: catch_config.hpp #define TWOBLUECUBES_CATCH_CONFIG_HPP_INCLUDED // #included from: catch_test_spec_parser.hpp #define TWOBLUECUBES_CATCH_TEST_SPEC_PARSER_HPP_INCLUDED #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif // #included from: catch_test_spec.hpp #define TWOBLUECUBES_CATCH_TEST_SPEC_HPP_INCLUDED #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wpadded" #endif // #included from: catch_wildcard_pattern.hpp #define TWOBLUECUBES_CATCH_WILDCARD_PATTERN_HPP_INCLUDED #include namespace Catch { class WildcardPattern { enum WildcardPosition { NoWildcard = 0, WildcardAtStart = 1, WildcardAtEnd = 2, WildcardAtBothEnds = WildcardAtStart | WildcardAtEnd }; public: WildcardPattern( std::string const& pattern, CaseSensitive::Choice caseSensitivity ) : m_caseSensitivity( caseSensitivity ), m_wildcard( NoWildcard ), m_pattern( adjustCase( pattern ) ) { if( startsWith( m_pattern, '*' ) ) { m_pattern = m_pattern.substr( 1 ); m_wildcard = WildcardAtStart; } if( endsWith( m_pattern, '*' ) ) { m_pattern = m_pattern.substr( 0, m_pattern.size()-1 ); m_wildcard = static_cast( m_wildcard | WildcardAtEnd ); } } virtual ~WildcardPattern(); virtual bool matches( std::string const& str ) const { switch( m_wildcard ) { case NoWildcard: return m_pattern == adjustCase( str ); case WildcardAtStart: return endsWith( adjustCase( str ), m_pattern ); case WildcardAtEnd: return startsWith( adjustCase( str ), m_pattern ); case WildcardAtBothEnds: return contains( adjustCase( str ), m_pattern ); } #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wunreachable-code" #endif throw std::logic_error( "Unknown enum" ); #ifdef __clang__ # pragma clang diagnostic pop #endif } private: std::string adjustCase( std::string const& str ) const { return m_caseSensitivity == CaseSensitive::No ? toLower( str ) : str; } CaseSensitive::Choice m_caseSensitivity; WildcardPosition m_wildcard; std::string m_pattern; }; } #include #include namespace Catch { class TestSpec { struct Pattern : SharedImpl<> { virtual ~Pattern(); virtual bool matches( TestCaseInfo const& testCase ) const = 0; }; class NamePattern : public Pattern { public: NamePattern( std::string const& name ) : m_wildcardPattern( toLower( name ), CaseSensitive::No ) {} virtual ~NamePattern(); virtual bool matches( TestCaseInfo const& testCase ) const { return m_wildcardPattern.matches( toLower( testCase.name ) ); } private: WildcardPattern m_wildcardPattern; }; class TagPattern : public Pattern { public: TagPattern( std::string const& tag ) : m_tag( toLower( tag ) ) {} virtual ~TagPattern(); virtual bool matches( TestCaseInfo const& testCase ) const { return testCase.lcaseTags.find( m_tag ) != testCase.lcaseTags.end(); } private: std::string m_tag; }; class ExcludedPattern : public Pattern { public: ExcludedPattern( Ptr const& underlyingPattern ) : m_underlyingPattern( underlyingPattern ) {} virtual ~ExcludedPattern(); virtual bool matches( TestCaseInfo const& testCase ) const { return !m_underlyingPattern->matches( testCase ); } private: Ptr m_underlyingPattern; }; struct Filter { std::vector > m_patterns; bool matches( TestCaseInfo const& testCase ) const { // All patterns in a filter must match for the filter to be a match for( std::vector >::const_iterator it = m_patterns.begin(), itEnd = m_patterns.end(); it != itEnd; ++it ) { if( !(*it)->matches( testCase ) ) return false; } return true; } }; public: bool hasFilters() const { return !m_filters.empty(); } bool matches( TestCaseInfo const& testCase ) const { // A TestSpec matches if any filter matches for( std::vector::const_iterator it = m_filters.begin(), itEnd = m_filters.end(); it != itEnd; ++it ) if( it->matches( testCase ) ) return true; return false; } private: std::vector m_filters; friend class TestSpecParser; }; } #ifdef __clang__ # pragma clang diagnostic pop #endif namespace Catch { class TestSpecParser { enum Mode{ None, Name, QuotedName, Tag, EscapedName }; Mode m_mode; bool m_exclusion; std::size_t m_start, m_pos; std::string m_arg; std::vector m_escapeChars; TestSpec::Filter m_currentFilter; TestSpec m_testSpec; ITagAliasRegistry const* m_tagAliases; public: TestSpecParser( ITagAliasRegistry const& tagAliases ) : m_tagAliases( &tagAliases ) {} TestSpecParser& parse( std::string const& arg ) { m_mode = None; m_exclusion = false; m_start = std::string::npos; m_arg = m_tagAliases->expandAliases( arg ); m_escapeChars.clear(); for( m_pos = 0; m_pos < m_arg.size(); ++m_pos ) visitChar( m_arg[m_pos] ); if( m_mode == Name ) addPattern(); return *this; } TestSpec testSpec() { addFilter(); return m_testSpec; } private: void visitChar( char c ) { if( m_mode == None ) { switch( c ) { case ' ': return; case '~': m_exclusion = true; return; case '[': return startNewMode( Tag, ++m_pos ); case '"': return startNewMode( QuotedName, ++m_pos ); case '\\': return escape(); default: startNewMode( Name, m_pos ); break; } } if( m_mode == Name ) { if( c == ',' ) { addPattern(); addFilter(); } else if( c == '[' ) { if( subString() == "exclude:" ) m_exclusion = true; else addPattern(); startNewMode( Tag, ++m_pos ); } else if( c == '\\' ) escape(); } else if( m_mode == EscapedName ) m_mode = Name; else if( m_mode == QuotedName && c == '"' ) addPattern(); else if( m_mode == Tag && c == ']' ) addPattern(); } void startNewMode( Mode mode, std::size_t start ) { m_mode = mode; m_start = start; } void escape() { if( m_mode == None ) m_start = m_pos; m_mode = EscapedName; m_escapeChars.push_back( m_pos ); } std::string subString() const { return m_arg.substr( m_start, m_pos - m_start ); } template void addPattern() { std::string token = subString(); for( size_t i = 0; i < m_escapeChars.size(); ++i ) token = token.substr( 0, m_escapeChars[i]-m_start-i ) + token.substr( m_escapeChars[i]-m_start-i+1 ); m_escapeChars.clear(); if( startsWith( token, "exclude:" ) ) { m_exclusion = true; token = token.substr( 8 ); } if( !token.empty() ) { Ptr pattern = new T( token ); if( m_exclusion ) pattern = new TestSpec::ExcludedPattern( pattern ); m_currentFilter.m_patterns.push_back( pattern ); } m_exclusion = false; m_mode = None; } void addFilter() { if( !m_currentFilter.m_patterns.empty() ) { m_testSpec.m_filters.push_back( m_currentFilter ); m_currentFilter = TestSpec::Filter(); } } }; inline TestSpec parseTestSpec( std::string const& arg ) { return TestSpecParser( ITagAliasRegistry::get() ).parse( arg ).testSpec(); } } // namespace Catch #ifdef __clang__ # pragma clang diagnostic pop #endif // #included from: catch_interfaces_config.h #define TWOBLUECUBES_CATCH_INTERFACES_CONFIG_H_INCLUDED #include #include #include namespace Catch { struct Verbosity { enum Level { NoOutput = 0, Quiet, Normal }; }; struct WarnAbout { enum What { Nothing = 0x00, NoAssertions = 0x01 }; }; struct ShowDurations { enum OrNot { DefaultForReporter, Always, Never }; }; struct RunTests { enum InWhatOrder { InDeclarationOrder, InLexicographicalOrder, InRandomOrder }; }; struct UseColour { enum YesOrNo { Auto, Yes, No }; }; class TestSpec; struct IConfig : IShared { virtual ~IConfig(); virtual bool allowThrows() const = 0; virtual std::ostream& stream() const = 0; virtual std::string name() const = 0; virtual bool includeSuccessfulResults() const = 0; virtual bool shouldDebugBreak() const = 0; virtual bool warnAboutMissingAssertions() const = 0; virtual int abortAfter() const = 0; virtual bool showInvisibles() const = 0; virtual ShowDurations::OrNot showDurations() const = 0; virtual TestSpec const& testSpec() const = 0; virtual RunTests::InWhatOrder runOrder() const = 0; virtual unsigned int rngSeed() const = 0; virtual UseColour::YesOrNo useColour() const = 0; virtual std::vector const& getSectionsToRun() const = 0; }; } // #included from: catch_stream.h #define TWOBLUECUBES_CATCH_STREAM_H_INCLUDED // #included from: catch_streambuf.h #define TWOBLUECUBES_CATCH_STREAMBUF_H_INCLUDED #include namespace Catch { class StreamBufBase : public std::streambuf { public: virtual ~StreamBufBase() CATCH_NOEXCEPT; }; } #include #include #include #include namespace Catch { std::ostream& cout(); std::ostream& cerr(); struct IStream { virtual ~IStream() CATCH_NOEXCEPT; virtual std::ostream& stream() const = 0; }; class FileStream : public IStream { mutable std::ofstream m_ofs; public: FileStream( std::string const& filename ); virtual ~FileStream() CATCH_NOEXCEPT; public: // IStream virtual std::ostream& stream() const CATCH_OVERRIDE; }; class CoutStream : public IStream { mutable std::ostream m_os; public: CoutStream(); virtual ~CoutStream() CATCH_NOEXCEPT; public: // IStream virtual std::ostream& stream() const CATCH_OVERRIDE; }; class DebugOutStream : public IStream { CATCH_AUTO_PTR( StreamBufBase ) m_streamBuf; mutable std::ostream m_os; public: DebugOutStream(); virtual ~DebugOutStream() CATCH_NOEXCEPT; public: // IStream virtual std::ostream& stream() const CATCH_OVERRIDE; }; } #include #include #include #include #ifndef CATCH_CONFIG_CONSOLE_WIDTH #define CATCH_CONFIG_CONSOLE_WIDTH 80 #endif namespace Catch { struct ConfigData { ConfigData() : listTests( false ), listTags( false ), listReporters( false ), listTestNamesOnly( false ), listExtraInfo( false ), showSuccessfulTests( false ), shouldDebugBreak( false ), noThrow( false ), showHelp( false ), showInvisibles( false ), filenamesAsTags( false ), abortAfter( -1 ), rngSeed( 0 ), verbosity( Verbosity::Normal ), warnings( WarnAbout::Nothing ), showDurations( ShowDurations::DefaultForReporter ), runOrder( RunTests::InDeclarationOrder ), useColour( UseColour::Auto ) {} bool listTests; bool listTags; bool listReporters; bool listTestNamesOnly; bool listExtraInfo; bool showSuccessfulTests; bool shouldDebugBreak; bool noThrow; bool showHelp; bool showInvisibles; bool filenamesAsTags; int abortAfter; unsigned int rngSeed; Verbosity::Level verbosity; WarnAbout::What warnings; ShowDurations::OrNot showDurations; RunTests::InWhatOrder runOrder; UseColour::YesOrNo useColour; std::string outputFilename; std::string name; std::string processName; std::vector reporterNames; std::vector testsOrTags; std::vector sectionsToRun; }; class Config : public SharedImpl { private: Config( Config const& other ); Config& operator = ( Config const& other ); virtual void dummy(); public: Config() {} Config( ConfigData const& data ) : m_data( data ), m_stream( openStream() ) { if( !data.testsOrTags.empty() ) { TestSpecParser parser( ITagAliasRegistry::get() ); for( std::size_t i = 0; i < data.testsOrTags.size(); ++i ) parser.parse( data.testsOrTags[i] ); m_testSpec = parser.testSpec(); } } virtual ~Config() {} std::string const& getFilename() const { return m_data.outputFilename ; } bool listTests() const { return m_data.listTests; } bool listTestNamesOnly() const { return m_data.listTestNamesOnly; } bool listTags() const { return m_data.listTags; } bool listReporters() const { return m_data.listReporters; } bool listExtraInfo() const { return m_data.listExtraInfo; } std::string getProcessName() const { return m_data.processName; } std::vector const& getReporterNames() const { return m_data.reporterNames; } std::vector const& getSectionsToRun() const CATCH_OVERRIDE { return m_data.sectionsToRun; } virtual TestSpec const& testSpec() const CATCH_OVERRIDE { return m_testSpec; } bool showHelp() const { return m_data.showHelp; } // IConfig interface virtual bool allowThrows() const CATCH_OVERRIDE { return !m_data.noThrow; } virtual std::ostream& stream() const CATCH_OVERRIDE { return m_stream->stream(); } virtual std::string name() const CATCH_OVERRIDE { return m_data.name.empty() ? m_data.processName : m_data.name; } virtual bool includeSuccessfulResults() const CATCH_OVERRIDE { return m_data.showSuccessfulTests; } virtual bool warnAboutMissingAssertions() const CATCH_OVERRIDE { return m_data.warnings & WarnAbout::NoAssertions; } virtual ShowDurations::OrNot showDurations() const CATCH_OVERRIDE { return m_data.showDurations; } virtual RunTests::InWhatOrder runOrder() const CATCH_OVERRIDE { return m_data.runOrder; } virtual unsigned int rngSeed() const CATCH_OVERRIDE { return m_data.rngSeed; } virtual UseColour::YesOrNo useColour() const CATCH_OVERRIDE { return m_data.useColour; } virtual bool shouldDebugBreak() const CATCH_OVERRIDE { return m_data.shouldDebugBreak; } virtual int abortAfter() const CATCH_OVERRIDE { return m_data.abortAfter; } virtual bool showInvisibles() const CATCH_OVERRIDE { return m_data.showInvisibles; } private: IStream const* openStream() { if( m_data.outputFilename.empty() ) return new CoutStream(); else if( m_data.outputFilename[0] == '%' ) { if( m_data.outputFilename == "%debug" ) return new DebugOutStream(); else throw std::domain_error( "Unrecognised stream: " + m_data.outputFilename ); } else return new FileStream( m_data.outputFilename ); } ConfigData m_data; CATCH_AUTO_PTR( IStream const ) m_stream; TestSpec m_testSpec; }; } // end namespace Catch // #included from: catch_clara.h #define TWOBLUECUBES_CATCH_CLARA_H_INCLUDED // Use Catch's value for console width (store Clara's off to the side, if present) #ifdef CLARA_CONFIG_CONSOLE_WIDTH #define CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH CLARA_CONFIG_CONSOLE_WIDTH #undef CLARA_CONFIG_CONSOLE_WIDTH #endif #define CLARA_CONFIG_CONSOLE_WIDTH CATCH_CONFIG_CONSOLE_WIDTH // Declare Clara inside the Catch namespace #define STITCH_CLARA_OPEN_NAMESPACE namespace Catch { // #included from: ../external/clara.h // Version 0.0.2.4 // Only use header guard if we are not using an outer namespace #if !defined(TWOBLUECUBES_CLARA_H_INCLUDED) || defined(STITCH_CLARA_OPEN_NAMESPACE) #ifndef STITCH_CLARA_OPEN_NAMESPACE #define TWOBLUECUBES_CLARA_H_INCLUDED #define STITCH_CLARA_OPEN_NAMESPACE #define STITCH_CLARA_CLOSE_NAMESPACE #else #define STITCH_CLARA_CLOSE_NAMESPACE } #endif #define STITCH_TBC_TEXT_FORMAT_OPEN_NAMESPACE STITCH_CLARA_OPEN_NAMESPACE // ----------- #included from tbc_text_format.h ----------- // Only use header guard if we are not using an outer namespace #if !defined(TBC_TEXT_FORMAT_H_INCLUDED) || defined(STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE) #ifndef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE #define TBC_TEXT_FORMAT_H_INCLUDED #endif #include #include #include #include #include // Use optional outer namespace #ifdef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE namespace STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE { #endif namespace Tbc { #ifdef TBC_TEXT_FORMAT_CONSOLE_WIDTH const unsigned int consoleWidth = TBC_TEXT_FORMAT_CONSOLE_WIDTH; #else const unsigned int consoleWidth = 80; #endif struct TextAttributes { TextAttributes() : initialIndent( std::string::npos ), indent( 0 ), width( consoleWidth-1 ), tabChar( '\t' ) {} TextAttributes& setInitialIndent( std::size_t _value ) { initialIndent = _value; return *this; } TextAttributes& setIndent( std::size_t _value ) { indent = _value; return *this; } TextAttributes& setWidth( std::size_t _value ) { width = _value; return *this; } TextAttributes& setTabChar( char _value ) { tabChar = _value; return *this; } std::size_t initialIndent; // indent of first line, or npos std::size_t indent; // indent of subsequent lines, or all if initialIndent is npos std::size_t width; // maximum width of text, including indent. Longer text will wrap char tabChar; // If this char is seen the indent is changed to current pos }; class Text { public: Text( std::string const& _str, TextAttributes const& _attr = TextAttributes() ) : attr( _attr ) { std::string wrappableChars = " [({.,/|\\-"; std::size_t indent = _attr.initialIndent != std::string::npos ? _attr.initialIndent : _attr.indent; std::string remainder = _str; while( !remainder.empty() ) { if( lines.size() >= 1000 ) { lines.push_back( "... message truncated due to excessive size" ); return; } std::size_t tabPos = std::string::npos; std::size_t width = (std::min)( remainder.size(), _attr.width - indent ); std::size_t pos = remainder.find_first_of( '\n' ); if( pos <= width ) { width = pos; } pos = remainder.find_last_of( _attr.tabChar, width ); if( pos != std::string::npos ) { tabPos = pos; if( remainder[width] == '\n' ) width--; remainder = remainder.substr( 0, tabPos ) + remainder.substr( tabPos+1 ); } if( width == remainder.size() ) { spliceLine( indent, remainder, width ); } else if( remainder[width] == '\n' ) { spliceLine( indent, remainder, width ); if( width <= 1 || remainder.size() != 1 ) remainder = remainder.substr( 1 ); indent = _attr.indent; } else { pos = remainder.find_last_of( wrappableChars, width ); if( pos != std::string::npos && pos > 0 ) { spliceLine( indent, remainder, pos ); if( remainder[0] == ' ' ) remainder = remainder.substr( 1 ); } else { spliceLine( indent, remainder, width-1 ); lines.back() += "-"; } if( lines.size() == 1 ) indent = _attr.indent; if( tabPos != std::string::npos ) indent += tabPos; } } } void spliceLine( std::size_t _indent, std::string& _remainder, std::size_t _pos ) { lines.push_back( std::string( _indent, ' ' ) + _remainder.substr( 0, _pos ) ); _remainder = _remainder.substr( _pos ); } typedef std::vector::const_iterator const_iterator; const_iterator begin() const { return lines.begin(); } const_iterator end() const { return lines.end(); } std::string const& last() const { return lines.back(); } std::size_t size() const { return lines.size(); } std::string const& operator[]( std::size_t _index ) const { return lines[_index]; } std::string toString() const { std::ostringstream oss; oss << *this; return oss.str(); } inline friend std::ostream& operator << ( std::ostream& _stream, Text const& _text ) { for( Text::const_iterator it = _text.begin(), itEnd = _text.end(); it != itEnd; ++it ) { if( it != _text.begin() ) _stream << "\n"; _stream << *it; } return _stream; } private: std::string str; TextAttributes attr; std::vector lines; }; } // end namespace Tbc #ifdef STITCH_TBC_TEXT_FORMAT_OUTER_NAMESPACE } // end outer namespace #endif #endif // TBC_TEXT_FORMAT_H_INCLUDED // ----------- end of #include from tbc_text_format.h ----------- // ........... back in clara.h #undef STITCH_TBC_TEXT_FORMAT_OPEN_NAMESPACE // ----------- #included from clara_compilers.h ----------- #ifndef TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED #define TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED // Detect a number of compiler features - mostly C++11/14 conformance - by compiler // The following features are defined: // // CLARA_CONFIG_CPP11_NULLPTR : is nullptr supported? // CLARA_CONFIG_CPP11_NOEXCEPT : is noexcept supported? // CLARA_CONFIG_CPP11_GENERATED_METHODS : The delete and default keywords for compiler generated methods // CLARA_CONFIG_CPP11_OVERRIDE : is override supported? // CLARA_CONFIG_CPP11_UNIQUE_PTR : is unique_ptr supported (otherwise use auto_ptr) // CLARA_CONFIG_CPP11_OR_GREATER : Is C++11 supported? // CLARA_CONFIG_VARIADIC_MACROS : are variadic macros supported? // In general each macro has a _NO_ form // (e.g. CLARA_CONFIG_CPP11_NO_NULLPTR) which disables the feature. // Many features, at point of detection, define an _INTERNAL_ macro, so they // can be combined, en-mass, with the _NO_ forms later. // All the C++11 features can be disabled with CLARA_CONFIG_NO_CPP11 #ifdef __clang__ #if __has_feature(cxx_nullptr) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #endif #if __has_feature(cxx_noexcept) #define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #endif #endif // __clang__ //////////////////////////////////////////////////////////////////////////////// // GCC #ifdef __GNUC__ #if __GNUC__ == 4 && __GNUC_MINOR__ >= 6 && defined(__GXX_EXPERIMENTAL_CXX0X__) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #endif // - otherwise more recent versions define __cplusplus >= 201103L // and will get picked up below #endif // __GNUC__ //////////////////////////////////////////////////////////////////////////////// // Visual C++ #ifdef _MSC_VER #if (_MSC_VER >= 1600) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #define CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR #endif #if (_MSC_VER >= 1900 ) // (VC++ 13 (VS2015)) #define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #define CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #endif #endif // _MSC_VER //////////////////////////////////////////////////////////////////////////////// // C++ language feature support // catch all support for C++11 #if defined(__cplusplus) && __cplusplus >= 201103L #define CLARA_CPP11_OR_GREATER #if !defined(CLARA_INTERNAL_CONFIG_CPP11_NULLPTR) #define CLARA_INTERNAL_CONFIG_CPP11_NULLPTR #endif #ifndef CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #define CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT #endif #ifndef CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #define CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS #endif #if !defined(CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE) #define CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE #endif #if !defined(CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) #define CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR #endif #endif // __cplusplus >= 201103L // Now set the actual defines based on the above + anything the user has configured #if defined(CLARA_INTERNAL_CONFIG_CPP11_NULLPTR) && !defined(CLARA_CONFIG_CPP11_NO_NULLPTR) && !defined(CLARA_CONFIG_CPP11_NULLPTR) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_NULLPTR #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_CONFIG_CPP11_NO_NOEXCEPT) && !defined(CLARA_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_NOEXCEPT #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_GENERATED_METHODS) && !defined(CLARA_CONFIG_CPP11_NO_GENERATED_METHODS) && !defined(CLARA_CONFIG_CPP11_GENERATED_METHODS) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_GENERATED_METHODS #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_OVERRIDE) && !defined(CLARA_CONFIG_NO_OVERRIDE) && !defined(CLARA_CONFIG_CPP11_OVERRIDE) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_OVERRIDE #endif #if defined(CLARA_INTERNAL_CONFIG_CPP11_UNIQUE_PTR) && !defined(CLARA_CONFIG_NO_UNIQUE_PTR) && !defined(CLARA_CONFIG_CPP11_UNIQUE_PTR) && !defined(CLARA_CONFIG_NO_CPP11) #define CLARA_CONFIG_CPP11_UNIQUE_PTR #endif // noexcept support: #if defined(CLARA_CONFIG_CPP11_NOEXCEPT) && !defined(CLARA_NOEXCEPT) #define CLARA_NOEXCEPT noexcept # define CLARA_NOEXCEPT_IS(x) noexcept(x) #else #define CLARA_NOEXCEPT throw() # define CLARA_NOEXCEPT_IS(x) #endif // nullptr support #ifdef CLARA_CONFIG_CPP11_NULLPTR #define CLARA_NULL nullptr #else #define CLARA_NULL NULL #endif // override support #ifdef CLARA_CONFIG_CPP11_OVERRIDE #define CLARA_OVERRIDE override #else #define CLARA_OVERRIDE #endif // unique_ptr support #ifdef CLARA_CONFIG_CPP11_UNIQUE_PTR # define CLARA_AUTO_PTR( T ) std::unique_ptr #else # define CLARA_AUTO_PTR( T ) std::auto_ptr #endif #endif // TWOBLUECUBES_CLARA_COMPILERS_H_INCLUDED // ----------- end of #include from clara_compilers.h ----------- // ........... back in clara.h #include #include #include #if defined(WIN32) || defined(__WIN32__) || defined(_WIN32) || defined(_MSC_VER) #define CLARA_PLATFORM_WINDOWS #endif // Use optional outer namespace #ifdef STITCH_CLARA_OPEN_NAMESPACE STITCH_CLARA_OPEN_NAMESPACE #endif namespace Clara { struct UnpositionalTag {}; extern UnpositionalTag _; #ifdef CLARA_CONFIG_MAIN UnpositionalTag _; #endif namespace Detail { #ifdef CLARA_CONSOLE_WIDTH const unsigned int consoleWidth = CLARA_CONFIG_CONSOLE_WIDTH; #else const unsigned int consoleWidth = 80; #endif using namespace Tbc; inline bool startsWith( std::string const& str, std::string const& prefix ) { return str.size() >= prefix.size() && str.substr( 0, prefix.size() ) == prefix; } template struct RemoveConstRef{ typedef T type; }; template struct RemoveConstRef{ typedef T type; }; template struct RemoveConstRef{ typedef T type; }; template struct RemoveConstRef{ typedef T type; }; template struct IsBool { static const bool value = false; }; template<> struct IsBool { static const bool value = true; }; template void convertInto( std::string const& _source, T& _dest ) { std::stringstream ss; ss << _source; ss >> _dest; if( ss.fail() ) throw std::runtime_error( "Unable to convert " + _source + " to destination type" ); } inline void convertInto( std::string const& _source, std::string& _dest ) { _dest = _source; } char toLowerCh(char c) { return static_cast( std::tolower( c ) ); } inline void convertInto( std::string const& _source, bool& _dest ) { std::string sourceLC = _source; std::transform( sourceLC.begin(), sourceLC.end(), sourceLC.begin(), toLowerCh ); if( sourceLC == "y" || sourceLC == "1" || sourceLC == "true" || sourceLC == "yes" || sourceLC == "on" ) _dest = true; else if( sourceLC == "n" || sourceLC == "0" || sourceLC == "false" || sourceLC == "no" || sourceLC == "off" ) _dest = false; else throw std::runtime_error( "Expected a boolean value but did not recognise:\n '" + _source + "'" ); } template struct IArgFunction { virtual ~IArgFunction() {} #ifdef CLARA_CONFIG_CPP11_GENERATED_METHODS IArgFunction() = default; IArgFunction( IArgFunction const& ) = default; #endif virtual void set( ConfigT& config, std::string const& value ) const = 0; virtual bool takesArg() const = 0; virtual IArgFunction* clone() const = 0; }; template class BoundArgFunction { public: BoundArgFunction() : functionObj( CLARA_NULL ) {} BoundArgFunction( IArgFunction* _functionObj ) : functionObj( _functionObj ) {} BoundArgFunction( BoundArgFunction const& other ) : functionObj( other.functionObj ? other.functionObj->clone() : CLARA_NULL ) {} BoundArgFunction& operator = ( BoundArgFunction const& other ) { IArgFunction* newFunctionObj = other.functionObj ? other.functionObj->clone() : CLARA_NULL; delete functionObj; functionObj = newFunctionObj; return *this; } ~BoundArgFunction() { delete functionObj; } void set( ConfigT& config, std::string const& value ) const { functionObj->set( config, value ); } bool takesArg() const { return functionObj->takesArg(); } bool isSet() const { return functionObj != CLARA_NULL; } private: IArgFunction* functionObj; }; template struct NullBinder : IArgFunction{ virtual void set( C&, std::string const& ) const {} virtual bool takesArg() const { return true; } virtual IArgFunction* clone() const { return new NullBinder( *this ); } }; template struct BoundDataMember : IArgFunction{ BoundDataMember( M C::* _member ) : member( _member ) {} virtual void set( C& p, std::string const& stringValue ) const { convertInto( stringValue, p.*member ); } virtual bool takesArg() const { return !IsBool::value; } virtual IArgFunction* clone() const { return new BoundDataMember( *this ); } M C::* member; }; template struct BoundUnaryMethod : IArgFunction{ BoundUnaryMethod( void (C::*_member)( M ) ) : member( _member ) {} virtual void set( C& p, std::string const& stringValue ) const { typename RemoveConstRef::type value; convertInto( stringValue, value ); (p.*member)( value ); } virtual bool takesArg() const { return !IsBool::value; } virtual IArgFunction* clone() const { return new BoundUnaryMethod( *this ); } void (C::*member)( M ); }; template struct BoundNullaryMethod : IArgFunction{ BoundNullaryMethod( void (C::*_member)() ) : member( _member ) {} virtual void set( C& p, std::string const& stringValue ) const { bool value; convertInto( stringValue, value ); if( value ) (p.*member)(); } virtual bool takesArg() const { return false; } virtual IArgFunction* clone() const { return new BoundNullaryMethod( *this ); } void (C::*member)(); }; template struct BoundUnaryFunction : IArgFunction{ BoundUnaryFunction( void (*_function)( C& ) ) : function( _function ) {} virtual void set( C& obj, std::string const& stringValue ) const { bool value; convertInto( stringValue, value ); if( value ) function( obj ); } virtual bool takesArg() const { return false; } virtual IArgFunction* clone() const { return new BoundUnaryFunction( *this ); } void (*function)( C& ); }; template struct BoundBinaryFunction : IArgFunction{ BoundBinaryFunction( void (*_function)( C&, T ) ) : function( _function ) {} virtual void set( C& obj, std::string const& stringValue ) const { typename RemoveConstRef::type value; convertInto( stringValue, value ); function( obj, value ); } virtual bool takesArg() const { return !IsBool::value; } virtual IArgFunction* clone() const { return new BoundBinaryFunction( *this ); } void (*function)( C&, T ); }; } // namespace Detail inline std::vector argsToVector( int argc, char const* const* const argv ) { std::vector args( static_cast( argc ) ); for( std::size_t i = 0; i < static_cast( argc ); ++i ) args[i] = argv[i]; return args; } class Parser { enum Mode { None, MaybeShortOpt, SlashOpt, ShortOpt, LongOpt, Positional }; Mode mode; std::size_t from; bool inQuotes; public: struct Token { enum Type { Positional, ShortOpt, LongOpt }; Token( Type _type, std::string const& _data ) : type( _type ), data( _data ) {} Type type; std::string data; }; Parser() : mode( None ), from( 0 ), inQuotes( false ){} void parseIntoTokens( std::vector const& args, std::vector& tokens ) { const std::string doubleDash = "--"; for( std::size_t i = 1; i < args.size() && args[i] != doubleDash; ++i ) parseIntoTokens( args[i], tokens); } void parseIntoTokens( std::string const& arg, std::vector& tokens ) { for( std::size_t i = 0; i < arg.size(); ++i ) { char c = arg[i]; if( c == '"' ) inQuotes = !inQuotes; mode = handleMode( i, c, arg, tokens ); } mode = handleMode( arg.size(), '\0', arg, tokens ); } Mode handleMode( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { switch( mode ) { case None: return handleNone( i, c ); case MaybeShortOpt: return handleMaybeShortOpt( i, c ); case ShortOpt: case LongOpt: case SlashOpt: return handleOpt( i, c, arg, tokens ); case Positional: return handlePositional( i, c, arg, tokens ); default: throw std::logic_error( "Unknown mode" ); } } Mode handleNone( std::size_t i, char c ) { if( inQuotes ) { from = i; return Positional; } switch( c ) { case '-': return MaybeShortOpt; #ifdef CLARA_PLATFORM_WINDOWS case '/': from = i+1; return SlashOpt; #endif default: from = i; return Positional; } } Mode handleMaybeShortOpt( std::size_t i, char c ) { switch( c ) { case '-': from = i+1; return LongOpt; default: from = i; return ShortOpt; } } Mode handleOpt( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { if( std::string( ":=\0", 3 ).find( c ) == std::string::npos ) return mode; std::string optName = arg.substr( from, i-from ); if( mode == ShortOpt ) for( std::size_t j = 0; j < optName.size(); ++j ) tokens.push_back( Token( Token::ShortOpt, optName.substr( j, 1 ) ) ); else if( mode == SlashOpt && optName.size() == 1 ) tokens.push_back( Token( Token::ShortOpt, optName ) ); else tokens.push_back( Token( Token::LongOpt, optName ) ); return None; } Mode handlePositional( std::size_t i, char c, std::string const& arg, std::vector& tokens ) { if( inQuotes || std::string( "\0", 1 ).find( c ) == std::string::npos ) return mode; std::string data = arg.substr( from, i-from ); tokens.push_back( Token( Token::Positional, data ) ); return None; } }; template struct CommonArgProperties { CommonArgProperties() {} CommonArgProperties( Detail::BoundArgFunction const& _boundField ) : boundField( _boundField ) {} Detail::BoundArgFunction boundField; std::string description; std::string detail; std::string placeholder; // Only value if boundField takes an arg bool takesArg() const { return !placeholder.empty(); } void validate() const { if( !boundField.isSet() ) throw std::logic_error( "option not bound" ); } }; struct OptionArgProperties { std::vector shortNames; std::string longName; bool hasShortName( std::string const& shortName ) const { return std::find( shortNames.begin(), shortNames.end(), shortName ) != shortNames.end(); } bool hasLongName( std::string const& _longName ) const { return _longName == longName; } }; struct PositionalArgProperties { PositionalArgProperties() : position( -1 ) {} int position; // -1 means non-positional (floating) bool isFixedPositional() const { return position != -1; } }; template class CommandLine { struct Arg : CommonArgProperties, OptionArgProperties, PositionalArgProperties { Arg() {} Arg( Detail::BoundArgFunction const& _boundField ) : CommonArgProperties( _boundField ) {} using CommonArgProperties::placeholder; // !TBD std::string dbgName() const { if( !longName.empty() ) return "--" + longName; if( !shortNames.empty() ) return "-" + shortNames[0]; return "positional args"; } std::string commands() const { std::ostringstream oss; bool first = true; std::vector::const_iterator it = shortNames.begin(), itEnd = shortNames.end(); for(; it != itEnd; ++it ) { if( first ) first = false; else oss << ", "; oss << "-" << *it; } if( !longName.empty() ) { if( !first ) oss << ", "; oss << "--" << longName; } if( !placeholder.empty() ) oss << " <" << placeholder << ">"; return oss.str(); } }; typedef CLARA_AUTO_PTR( Arg ) ArgAutoPtr; friend void addOptName( Arg& arg, std::string const& optName ) { if( optName.empty() ) return; if( Detail::startsWith( optName, "--" ) ) { if( !arg.longName.empty() ) throw std::logic_error( "Only one long opt may be specified. '" + arg.longName + "' already specified, now attempting to add '" + optName + "'" ); arg.longName = optName.substr( 2 ); } else if( Detail::startsWith( optName, "-" ) ) arg.shortNames.push_back( optName.substr( 1 ) ); else throw std::logic_error( "option must begin with - or --. Option was: '" + optName + "'" ); } friend void setPositionalArg( Arg& arg, int position ) { arg.position = position; } class ArgBuilder { public: ArgBuilder( Arg* arg ) : m_arg( arg ) {} // Bind a non-boolean data member (requires placeholder string) template void bind( M C::* field, std::string const& placeholder ) { m_arg->boundField = new Detail::BoundDataMember( field ); m_arg->placeholder = placeholder; } // Bind a boolean data member (no placeholder required) template void bind( bool C::* field ) { m_arg->boundField = new Detail::BoundDataMember( field ); } // Bind a method taking a single, non-boolean argument (requires a placeholder string) template void bind( void (C::* unaryMethod)( M ), std::string const& placeholder ) { m_arg->boundField = new Detail::BoundUnaryMethod( unaryMethod ); m_arg->placeholder = placeholder; } // Bind a method taking a single, boolean argument (no placeholder string required) template void bind( void (C::* unaryMethod)( bool ) ) { m_arg->boundField = new Detail::BoundUnaryMethod( unaryMethod ); } // Bind a method that takes no arguments (will be called if opt is present) template void bind( void (C::* nullaryMethod)() ) { m_arg->boundField = new Detail::BoundNullaryMethod( nullaryMethod ); } // Bind a free function taking a single argument - the object to operate on (no placeholder string required) template void bind( void (* unaryFunction)( C& ) ) { m_arg->boundField = new Detail::BoundUnaryFunction( unaryFunction ); } // Bind a free function taking a single argument - the object to operate on (requires a placeholder string) template void bind( void (* binaryFunction)( C&, T ), std::string const& placeholder ) { m_arg->boundField = new Detail::BoundBinaryFunction( binaryFunction ); m_arg->placeholder = placeholder; } ArgBuilder& describe( std::string const& description ) { m_arg->description = description; return *this; } ArgBuilder& detail( std::string const& detail ) { m_arg->detail = detail; return *this; } protected: Arg* m_arg; }; class OptBuilder : public ArgBuilder { public: OptBuilder( Arg* arg ) : ArgBuilder( arg ) {} OptBuilder( OptBuilder& other ) : ArgBuilder( other ) {} OptBuilder& operator[]( std::string const& optName ) { addOptName( *ArgBuilder::m_arg, optName ); return *this; } }; public: CommandLine() : m_boundProcessName( new Detail::NullBinder() ), m_highestSpecifiedArgPosition( 0 ), m_throwOnUnrecognisedTokens( false ) {} CommandLine( CommandLine const& other ) : m_boundProcessName( other.m_boundProcessName ), m_options ( other.m_options ), m_positionalArgs( other.m_positionalArgs ), m_highestSpecifiedArgPosition( other.m_highestSpecifiedArgPosition ), m_throwOnUnrecognisedTokens( other.m_throwOnUnrecognisedTokens ) { if( other.m_floatingArg.get() ) m_floatingArg.reset( new Arg( *other.m_floatingArg ) ); } CommandLine& setThrowOnUnrecognisedTokens( bool shouldThrow = true ) { m_throwOnUnrecognisedTokens = shouldThrow; return *this; } OptBuilder operator[]( std::string const& optName ) { m_options.push_back( Arg() ); addOptName( m_options.back(), optName ); OptBuilder builder( &m_options.back() ); return builder; } ArgBuilder operator[]( int position ) { m_positionalArgs.insert( std::make_pair( position, Arg() ) ); if( position > m_highestSpecifiedArgPosition ) m_highestSpecifiedArgPosition = position; setPositionalArg( m_positionalArgs[position], position ); ArgBuilder builder( &m_positionalArgs[position] ); return builder; } // Invoke this with the _ instance ArgBuilder operator[]( UnpositionalTag ) { if( m_floatingArg.get() ) throw std::logic_error( "Only one unpositional argument can be added" ); m_floatingArg.reset( new Arg() ); ArgBuilder builder( m_floatingArg.get() ); return builder; } template void bindProcessName( M C::* field ) { m_boundProcessName = new Detail::BoundDataMember( field ); } template void bindProcessName( void (C::*_unaryMethod)( M ) ) { m_boundProcessName = new Detail::BoundUnaryMethod( _unaryMethod ); } void optUsage( std::ostream& os, std::size_t indent = 0, std::size_t width = Detail::consoleWidth ) const { typename std::vector::const_iterator itBegin = m_options.begin(), itEnd = m_options.end(), it; std::size_t maxWidth = 0; for( it = itBegin; it != itEnd; ++it ) maxWidth = (std::max)( maxWidth, it->commands().size() ); for( it = itBegin; it != itEnd; ++it ) { Detail::Text usage( it->commands(), Detail::TextAttributes() .setWidth( maxWidth+indent ) .setIndent( indent ) ); Detail::Text desc( it->description, Detail::TextAttributes() .setWidth( width - maxWidth - 3 ) ); for( std::size_t i = 0; i < (std::max)( usage.size(), desc.size() ); ++i ) { std::string usageCol = i < usage.size() ? usage[i] : ""; os << usageCol; if( i < desc.size() && !desc[i].empty() ) os << std::string( indent + 2 + maxWidth - usageCol.size(), ' ' ) << desc[i]; os << "\n"; } } } std::string optUsage() const { std::ostringstream oss; optUsage( oss ); return oss.str(); } void argSynopsis( std::ostream& os ) const { for( int i = 1; i <= m_highestSpecifiedArgPosition; ++i ) { if( i > 1 ) os << " "; typename std::map::const_iterator it = m_positionalArgs.find( i ); if( it != m_positionalArgs.end() ) os << "<" << it->second.placeholder << ">"; else if( m_floatingArg.get() ) os << "<" << m_floatingArg->placeholder << ">"; else throw std::logic_error( "non consecutive positional arguments with no floating args" ); } // !TBD No indication of mandatory args if( m_floatingArg.get() ) { if( m_highestSpecifiedArgPosition > 1 ) os << " "; os << "[<" << m_floatingArg->placeholder << "> ...]"; } } std::string argSynopsis() const { std::ostringstream oss; argSynopsis( oss ); return oss.str(); } void usage( std::ostream& os, std::string const& procName ) const { validate(); os << "usage:\n " << procName << " "; argSynopsis( os ); if( !m_options.empty() ) { os << " [options]\n\nwhere options are: \n"; optUsage( os, 2 ); } os << "\n"; } std::string usage( std::string const& procName ) const { std::ostringstream oss; usage( oss, procName ); return oss.str(); } ConfigT parse( std::vector const& args ) const { ConfigT config; parseInto( args, config ); return config; } std::vector parseInto( std::vector const& args, ConfigT& config ) const { std::string processName = args.empty() ? std::string() : args[0]; std::size_t lastSlash = processName.find_last_of( "/\\" ); if( lastSlash != std::string::npos ) processName = processName.substr( lastSlash+1 ); m_boundProcessName.set( config, processName ); std::vector tokens; Parser parser; parser.parseIntoTokens( args, tokens ); return populate( tokens, config ); } std::vector populate( std::vector const& tokens, ConfigT& config ) const { validate(); std::vector unusedTokens = populateOptions( tokens, config ); unusedTokens = populateFixedArgs( unusedTokens, config ); unusedTokens = populateFloatingArgs( unusedTokens, config ); return unusedTokens; } std::vector populateOptions( std::vector const& tokens, ConfigT& config ) const { std::vector unusedTokens; std::vector errors; for( std::size_t i = 0; i < tokens.size(); ++i ) { Parser::Token const& token = tokens[i]; typename std::vector::const_iterator it = m_options.begin(), itEnd = m_options.end(); for(; it != itEnd; ++it ) { Arg const& arg = *it; try { if( ( token.type == Parser::Token::ShortOpt && arg.hasShortName( token.data ) ) || ( token.type == Parser::Token::LongOpt && arg.hasLongName( token.data ) ) ) { if( arg.takesArg() ) { if( i == tokens.size()-1 || tokens[i+1].type != Parser::Token::Positional ) errors.push_back( "Expected argument to option: " + token.data ); else arg.boundField.set( config, tokens[++i].data ); } else { arg.boundField.set( config, "true" ); } break; } } catch( std::exception& ex ) { errors.push_back( std::string( ex.what() ) + "\n- while parsing: (" + arg.commands() + ")" ); } } if( it == itEnd ) { if( token.type == Parser::Token::Positional || !m_throwOnUnrecognisedTokens ) unusedTokens.push_back( token ); else if( errors.empty() && m_throwOnUnrecognisedTokens ) errors.push_back( "unrecognised option: " + token.data ); } } if( !errors.empty() ) { std::ostringstream oss; for( std::vector::const_iterator it = errors.begin(), itEnd = errors.end(); it != itEnd; ++it ) { if( it != errors.begin() ) oss << "\n"; oss << *it; } throw std::runtime_error( oss.str() ); } return unusedTokens; } std::vector populateFixedArgs( std::vector const& tokens, ConfigT& config ) const { std::vector unusedTokens; int position = 1; for( std::size_t i = 0; i < tokens.size(); ++i ) { Parser::Token const& token = tokens[i]; typename std::map::const_iterator it = m_positionalArgs.find( position ); if( it != m_positionalArgs.end() ) it->second.boundField.set( config, token.data ); else unusedTokens.push_back( token ); if( token.type == Parser::Token::Positional ) position++; } return unusedTokens; } std::vector populateFloatingArgs( std::vector const& tokens, ConfigT& config ) const { if( !m_floatingArg.get() ) return tokens; std::vector unusedTokens; for( std::size_t i = 0; i < tokens.size(); ++i ) { Parser::Token const& token = tokens[i]; if( token.type == Parser::Token::Positional ) m_floatingArg->boundField.set( config, token.data ); else unusedTokens.push_back( token ); } return unusedTokens; } void validate() const { if( m_options.empty() && m_positionalArgs.empty() && !m_floatingArg.get() ) throw std::logic_error( "No options or arguments specified" ); for( typename std::vector::const_iterator it = m_options.begin(), itEnd = m_options.end(); it != itEnd; ++it ) it->validate(); } private: Detail::BoundArgFunction m_boundProcessName; std::vector m_options; std::map m_positionalArgs; ArgAutoPtr m_floatingArg; int m_highestSpecifiedArgPosition; bool m_throwOnUnrecognisedTokens; }; } // end namespace Clara STITCH_CLARA_CLOSE_NAMESPACE #undef STITCH_CLARA_OPEN_NAMESPACE #undef STITCH_CLARA_CLOSE_NAMESPACE #endif // TWOBLUECUBES_CLARA_H_INCLUDED #undef STITCH_CLARA_OPEN_NAMESPACE // Restore Clara's value for console width, if present #ifdef CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH #define CLARA_CONFIG_CONSOLE_WIDTH CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH #undef CATCH_TEMP_CLARA_CONFIG_CONSOLE_WIDTH #endif #include #include namespace Catch { inline void abortAfterFirst( ConfigData& config ) { config.abortAfter = 1; } inline void abortAfterX( ConfigData& config, int x ) { if( x < 1 ) throw std::runtime_error( "Value after -x or --abortAfter must be greater than zero" ); config.abortAfter = x; } inline void addTestOrTags( ConfigData& config, std::string const& _testSpec ) { config.testsOrTags.push_back( _testSpec ); } inline void addSectionToRun( ConfigData& config, std::string const& sectionName ) { config.sectionsToRun.push_back( sectionName ); } inline void addReporterName( ConfigData& config, std::string const& _reporterName ) { config.reporterNames.push_back( _reporterName ); } inline void addWarning( ConfigData& config, std::string const& _warning ) { if( _warning == "NoAssertions" ) config.warnings = static_cast( config.warnings | WarnAbout::NoAssertions ); else throw std::runtime_error( "Unrecognised warning: '" + _warning + '\'' ); } inline void setOrder( ConfigData& config, std::string const& order ) { if( startsWith( "declared", order ) ) config.runOrder = RunTests::InDeclarationOrder; else if( startsWith( "lexical", order ) ) config.runOrder = RunTests::InLexicographicalOrder; else if( startsWith( "random", order ) ) config.runOrder = RunTests::InRandomOrder; else throw std::runtime_error( "Unrecognised ordering: '" + order + '\'' ); } inline void setRngSeed( ConfigData& config, std::string const& seed ) { if( seed == "time" ) { config.rngSeed = static_cast( std::time(0) ); } else { std::stringstream ss; ss << seed; ss >> config.rngSeed; if( ss.fail() ) throw std::runtime_error( "Argument to --rng-seed should be the word 'time' or a number" ); } } inline void setVerbosity( ConfigData& config, int level ) { // !TBD: accept strings? config.verbosity = static_cast( level ); } inline void setShowDurations( ConfigData& config, bool _showDurations ) { config.showDurations = _showDurations ? ShowDurations::Always : ShowDurations::Never; } inline void setUseColour( ConfigData& config, std::string const& value ) { std::string mode = toLower( value ); if( mode == "yes" ) config.useColour = UseColour::Yes; else if( mode == "no" ) config.useColour = UseColour::No; else if( mode == "auto" ) config.useColour = UseColour::Auto; else throw std::runtime_error( "colour mode must be one of: auto, yes or no" ); } inline void forceColour( ConfigData& config ) { config.useColour = UseColour::Yes; } inline void loadTestNamesFromFile( ConfigData& config, std::string const& _filename ) { std::ifstream f( _filename.c_str() ); if( !f.is_open() ) throw std::domain_error( "Unable to load input file: " + _filename ); std::string line; while( std::getline( f, line ) ) { line = trim(line); if( !line.empty() && !startsWith( line, '#' ) ) { if( !startsWith( line, '"' ) ) line = '"' + line + '"'; addTestOrTags( config, line + ',' ); } } } inline Clara::CommandLine makeCommandLineParser() { using namespace Clara; CommandLine cli; cli.bindProcessName( &ConfigData::processName ); cli["-?"]["-h"]["--help"] .describe( "display usage information" ) .bind( &ConfigData::showHelp ); cli["-l"]["--list-tests"] .describe( "list all/matching test cases" ) .bind( &ConfigData::listTests ); cli["-t"]["--list-tags"] .describe( "list all/matching tags" ) .bind( &ConfigData::listTags ); cli["-s"]["--success"] .describe( "include successful tests in output" ) .bind( &ConfigData::showSuccessfulTests ); cli["-b"]["--break"] .describe( "break into debugger on failure" ) .bind( &ConfigData::shouldDebugBreak ); cli["-e"]["--nothrow"] .describe( "skip exception tests" ) .bind( &ConfigData::noThrow ); cli["-i"]["--invisibles"] .describe( "show invisibles (tabs, newlines)" ) .bind( &ConfigData::showInvisibles ); cli["-o"]["--out"] .describe( "output filename" ) .bind( &ConfigData::outputFilename, "filename" ); cli["-r"]["--reporter"] // .placeholder( "name[:filename]" ) .describe( "reporter to use (defaults to console)" ) .bind( &addReporterName, "name" ); cli["-n"]["--name"] .describe( "suite name" ) .bind( &ConfigData::name, "name" ); cli["-a"]["--abort"] .describe( "abort at first failure" ) .bind( &abortAfterFirst ); cli["-x"]["--abortx"] .describe( "abort after x failures" ) .bind( &abortAfterX, "no. failures" ); cli["-w"]["--warn"] .describe( "enable warnings" ) .bind( &addWarning, "warning name" ); // - needs updating if reinstated // cli.into( &setVerbosity ) // .describe( "level of verbosity (0=no output)" ) // .shortOpt( "v") // .longOpt( "verbosity" ) // .placeholder( "level" ); cli[_] .describe( "which test or tests to use" ) .bind( &addTestOrTags, "test name, pattern or tags" ); cli["-d"]["--durations"] .describe( "show test durations" ) .bind( &setShowDurations, "yes|no" ); cli["-f"]["--input-file"] .describe( "load test names to run from a file" ) .bind( &loadTestNamesFromFile, "filename" ); cli["-#"]["--filenames-as-tags"] .describe( "adds a tag for the filename" ) .bind( &ConfigData::filenamesAsTags ); cli["-c"]["--section"] .describe( "specify section to run" ) .bind( &addSectionToRun, "section name" ); // Less common commands which don't have a short form cli["--list-test-names-only"] .describe( "list all/matching test cases names only" ) .bind( &ConfigData::listTestNamesOnly ); cli["--list-extra-info"] .describe( "list all/matching test cases with more info" ) .bind( &ConfigData::listExtraInfo ); cli["--list-reporters"] .describe( "list all reporters" ) .bind( &ConfigData::listReporters ); cli["--order"] .describe( "test case order (defaults to decl)" ) .bind( &setOrder, "decl|lex|rand" ); cli["--rng-seed"] .describe( "set a specific seed for random numbers" ) .bind( &setRngSeed, "'time'|number" ); cli["--force-colour"] .describe( "force colourised output (deprecated)" ) .bind( &forceColour ); cli["--use-colour"] .describe( "should output be colourised" ) .bind( &setUseColour, "yes|no" ); return cli; } } // end namespace Catch // #included from: internal/catch_list.hpp #define TWOBLUECUBES_CATCH_LIST_HPP_INCLUDED // #included from: catch_text.h #define TWOBLUECUBES_CATCH_TEXT_H_INCLUDED #define TBC_TEXT_FORMAT_CONSOLE_WIDTH CATCH_CONFIG_CONSOLE_WIDTH #define CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE Catch // #included from: ../external/tbc_text_format.h // Only use header guard if we are not using an outer namespace #ifndef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE # ifdef TWOBLUECUBES_TEXT_FORMAT_H_INCLUDED # ifndef TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED # define TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED # endif # else # define TWOBLUECUBES_TEXT_FORMAT_H_INCLUDED # endif #endif #ifndef TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED #include #include #include // Use optional outer namespace #ifdef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE namespace CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE { #endif namespace Tbc { #ifdef TBC_TEXT_FORMAT_CONSOLE_WIDTH const unsigned int consoleWidth = TBC_TEXT_FORMAT_CONSOLE_WIDTH; #else const unsigned int consoleWidth = 80; #endif struct TextAttributes { TextAttributes() : initialIndent( std::string::npos ), indent( 0 ), width( consoleWidth-1 ) {} TextAttributes& setInitialIndent( std::size_t _value ) { initialIndent = _value; return *this; } TextAttributes& setIndent( std::size_t _value ) { indent = _value; return *this; } TextAttributes& setWidth( std::size_t _value ) { width = _value; return *this; } std::size_t initialIndent; // indent of first line, or npos std::size_t indent; // indent of subsequent lines, or all if initialIndent is npos std::size_t width; // maximum width of text, including indent. Longer text will wrap }; class Text { public: Text( std::string const& _str, TextAttributes const& _attr = TextAttributes() ) : attr( _attr ) { const std::string wrappableBeforeChars = "[({<\t"; const std::string wrappableAfterChars = "])}>-,./|\\"; const std::string wrappableInsteadOfChars = " \n\r"; std::string indent = _attr.initialIndent != std::string::npos ? std::string( _attr.initialIndent, ' ' ) : std::string( _attr.indent, ' ' ); typedef std::string::const_iterator iterator; iterator it = _str.begin(); const iterator strEnd = _str.end(); while( it != strEnd ) { if( lines.size() >= 1000 ) { lines.push_back( "... message truncated due to excessive size" ); return; } std::string suffix; std::size_t width = (std::min)( static_cast( strEnd-it ), _attr.width-static_cast( indent.size() ) ); iterator itEnd = it+width; iterator itNext = _str.end(); iterator itNewLine = std::find( it, itEnd, '\n' ); if( itNewLine != itEnd ) itEnd = itNewLine; if( itEnd != strEnd ) { bool foundWrapPoint = false; iterator findIt = itEnd; do { if( wrappableAfterChars.find( *findIt ) != std::string::npos && findIt != itEnd ) { itEnd = findIt+1; itNext = findIt+1; foundWrapPoint = true; } else if( findIt > it && wrappableBeforeChars.find( *findIt ) != std::string::npos ) { itEnd = findIt; itNext = findIt; foundWrapPoint = true; } else if( wrappableInsteadOfChars.find( *findIt ) != std::string::npos ) { itNext = findIt+1; itEnd = findIt; foundWrapPoint = true; } if( findIt == it ) break; else --findIt; } while( !foundWrapPoint ); if( !foundWrapPoint ) { // No good wrap char, so we'll break mid word and add a hyphen --itEnd; itNext = itEnd; suffix = "-"; } else { while( itEnd > it && wrappableInsteadOfChars.find( *(itEnd-1) ) != std::string::npos ) --itEnd; } } lines.push_back( indent + std::string( it, itEnd ) + suffix ); if( indent.size() != _attr.indent ) indent = std::string( _attr.indent, ' ' ); it = itNext; } } typedef std::vector::const_iterator const_iterator; const_iterator begin() const { return lines.begin(); } const_iterator end() const { return lines.end(); } std::string const& last() const { return lines.back(); } std::size_t size() const { return lines.size(); } std::string const& operator[]( std::size_t _index ) const { return lines[_index]; } std::string toString() const { std::ostringstream oss; oss << *this; return oss.str(); } inline friend std::ostream& operator << ( std::ostream& _stream, Text const& _text ) { for( Text::const_iterator it = _text.begin(), itEnd = _text.end(); it != itEnd; ++it ) { if( it != _text.begin() ) _stream << "\n"; _stream << *it; } return _stream; } private: std::string str; TextAttributes attr; std::vector lines; }; } // end namespace Tbc #ifdef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE } // end outer namespace #endif #endif // TWOBLUECUBES_TEXT_FORMAT_H_ALREADY_INCLUDED #undef CLICHE_TBC_TEXT_FORMAT_OUTER_NAMESPACE namespace Catch { using Tbc::Text; using Tbc::TextAttributes; } // #included from: catch_console_colour.hpp #define TWOBLUECUBES_CATCH_CONSOLE_COLOUR_HPP_INCLUDED namespace Catch { struct Colour { enum Code { None = 0, White, Red, Green, Blue, Cyan, Yellow, Grey, Bright = 0x10, BrightRed = Bright | Red, BrightGreen = Bright | Green, LightGrey = Bright | Grey, BrightWhite = Bright | White, // By intention FileName = LightGrey, Warning = Yellow, ResultError = BrightRed, ResultSuccess = BrightGreen, ResultExpectedFailure = Warning, Error = BrightRed, Success = Green, OriginalExpression = Cyan, ReconstructedExpression = Yellow, SecondaryText = LightGrey, Headers = White }; // Use constructed object for RAII guard Colour( Code _colourCode ); Colour( Colour const& other ); ~Colour(); // Use static method for one-shot changes static void use( Code _colourCode ); private: bool m_moved; }; inline std::ostream& operator << ( std::ostream& os, Colour const& ) { return os; } } // end namespace Catch // #included from: catch_interfaces_reporter.h #define TWOBLUECUBES_CATCH_INTERFACES_REPORTER_H_INCLUDED #include #include #include namespace Catch { struct ReporterConfig { explicit ReporterConfig( Ptr const& _fullConfig ) : m_stream( &_fullConfig->stream() ), m_fullConfig( _fullConfig ) {} ReporterConfig( Ptr const& _fullConfig, std::ostream& _stream ) : m_stream( &_stream ), m_fullConfig( _fullConfig ) {} std::ostream& stream() const { return *m_stream; } Ptr fullConfig() const { return m_fullConfig; } private: std::ostream* m_stream; Ptr m_fullConfig; }; struct ReporterPreferences { ReporterPreferences() : shouldRedirectStdOut( false ) {} bool shouldRedirectStdOut; }; template struct LazyStat : Option { LazyStat() : used( false ) {} LazyStat& operator=( T const& _value ) { Option::operator=( _value ); used = false; return *this; } void reset() { Option::reset(); used = false; } bool used; }; struct TestRunInfo { TestRunInfo( std::string const& _name ) : name( _name ) {} std::string name; }; struct GroupInfo { GroupInfo( std::string const& _name, std::size_t _groupIndex, std::size_t _groupsCount ) : name( _name ), groupIndex( _groupIndex ), groupsCounts( _groupsCount ) {} std::string name; std::size_t groupIndex; std::size_t groupsCounts; }; struct AssertionStats { AssertionStats( AssertionResult const& _assertionResult, std::vector const& _infoMessages, Totals const& _totals ) : assertionResult( _assertionResult ), infoMessages( _infoMessages ), totals( _totals ) { if( assertionResult.hasMessage() ) { // Copy message into messages list. // !TBD This should have been done earlier, somewhere MessageBuilder builder( assertionResult.getTestMacroName(), assertionResult.getSourceInfo(), assertionResult.getResultType() ); builder << assertionResult.getMessage(); builder.m_info.message = builder.m_stream.str(); infoMessages.push_back( builder.m_info ); } } virtual ~AssertionStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS AssertionStats( AssertionStats const& ) = default; AssertionStats( AssertionStats && ) = default; AssertionStats& operator = ( AssertionStats const& ) = default; AssertionStats& operator = ( AssertionStats && ) = default; # endif AssertionResult assertionResult; std::vector infoMessages; Totals totals; }; struct SectionStats { SectionStats( SectionInfo const& _sectionInfo, Counts const& _assertions, double _durationInSeconds, bool _missingAssertions ) : sectionInfo( _sectionInfo ), assertions( _assertions ), durationInSeconds( _durationInSeconds ), missingAssertions( _missingAssertions ) {} virtual ~SectionStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS SectionStats( SectionStats const& ) = default; SectionStats( SectionStats && ) = default; SectionStats& operator = ( SectionStats const& ) = default; SectionStats& operator = ( SectionStats && ) = default; # endif SectionInfo sectionInfo; Counts assertions; double durationInSeconds; bool missingAssertions; }; struct TestCaseStats { TestCaseStats( TestCaseInfo const& _testInfo, Totals const& _totals, std::string const& _stdOut, std::string const& _stdErr, bool _aborting ) : testInfo( _testInfo ), totals( _totals ), stdOut( _stdOut ), stdErr( _stdErr ), aborting( _aborting ) {} virtual ~TestCaseStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS TestCaseStats( TestCaseStats const& ) = default; TestCaseStats( TestCaseStats && ) = default; TestCaseStats& operator = ( TestCaseStats const& ) = default; TestCaseStats& operator = ( TestCaseStats && ) = default; # endif TestCaseInfo testInfo; Totals totals; std::string stdOut; std::string stdErr; bool aborting; }; struct TestGroupStats { TestGroupStats( GroupInfo const& _groupInfo, Totals const& _totals, bool _aborting ) : groupInfo( _groupInfo ), totals( _totals ), aborting( _aborting ) {} TestGroupStats( GroupInfo const& _groupInfo ) : groupInfo( _groupInfo ), aborting( false ) {} virtual ~TestGroupStats(); # ifdef CATCH_CONFIG_CPP11_GENERATED_METHODS TestGroupStats( TestGroupStats const& ) = default; TestGroupStats( TestGroupStats && ) = default; TestGroupStats& operator = ( TestGroupStats const& ) = default; TestGroupStats& operator = ( TestGroupStats && ) = default; # endif GroupInfo groupInfo; Totals totals; bool aborting; }; struct TestRunStats { TestRunStats( TestRunInfo const& _runInfo, Totals const& _totals, bool _aborting ) : runInfo( _runInfo ), totals( _totals ), aborting( _aborting ) {} virtual ~TestRunStats(); # ifndef CATCH_CONFIG_CPP11_GENERATED_METHODS TestRunStats( TestRunStats const& _other ) : runInfo( _other.runInfo ), totals( _other.totals ), aborting( _other.aborting ) {} # else TestRunStats( TestRunStats const& ) = default; TestRunStats( TestRunStats && ) = default; TestRunStats& operator = ( TestRunStats const& ) = default; TestRunStats& operator = ( TestRunStats && ) = default; # endif TestRunInfo runInfo; Totals totals; bool aborting; }; class MultipleReporters; struct IStreamingReporter : IShared { virtual ~IStreamingReporter(); // Implementing class must also provide the following static method: // static std::string getDescription(); virtual ReporterPreferences getPreferences() const = 0; virtual void noMatchingTestCases( std::string const& spec ) = 0; virtual void testRunStarting( TestRunInfo const& testRunInfo ) = 0; virtual void testGroupStarting( GroupInfo const& groupInfo ) = 0; virtual void testCaseStarting( TestCaseInfo const& testInfo ) = 0; virtual void sectionStarting( SectionInfo const& sectionInfo ) = 0; virtual void assertionStarting( AssertionInfo const& assertionInfo ) = 0; // The return value indicates if the messages buffer should be cleared: virtual bool assertionEnded( AssertionStats const& assertionStats ) = 0; virtual void sectionEnded( SectionStats const& sectionStats ) = 0; virtual void testCaseEnded( TestCaseStats const& testCaseStats ) = 0; virtual void testGroupEnded( TestGroupStats const& testGroupStats ) = 0; virtual void testRunEnded( TestRunStats const& testRunStats ) = 0; virtual void skipTest( TestCaseInfo const& testInfo ) = 0; virtual MultipleReporters* tryAsMulti() { return CATCH_NULL; } }; struct IReporterFactory : IShared { virtual ~IReporterFactory(); virtual IStreamingReporter* create( ReporterConfig const& config ) const = 0; virtual std::string getDescription() const = 0; }; struct IReporterRegistry { typedef std::map > FactoryMap; typedef std::vector > Listeners; virtual ~IReporterRegistry(); virtual IStreamingReporter* create( std::string const& name, Ptr const& config ) const = 0; virtual FactoryMap const& getFactories() const = 0; virtual Listeners const& getListeners() const = 0; }; Ptr addReporter( Ptr const& existingReporter, Ptr const& additionalReporter ); } #include #include namespace Catch { inline std::size_t listTests( Config const& config ) { TestSpec testSpec = config.testSpec(); if( config.testSpec().hasFilters() ) Catch::cout() << "Matching test cases:\n"; else { Catch::cout() << "All available test cases:\n"; testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); } std::size_t matchedTests = 0; TextAttributes nameAttr, descAttr, tagsAttr; nameAttr.setInitialIndent( 2 ).setIndent( 4 ); descAttr.setIndent( 4 ); tagsAttr.setIndent( 6 ); std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); it != itEnd; ++it ) { matchedTests++; TestCaseInfo const& testCaseInfo = it->getTestCaseInfo(); Colour::Code colour = testCaseInfo.isHidden() ? Colour::SecondaryText : Colour::None; Colour colourGuard( colour ); Catch::cout() << Text( testCaseInfo.name, nameAttr ) << std::endl; if( config.listExtraInfo() ) { Catch::cout() << " " << testCaseInfo.lineInfo << std::endl; std::string description = testCaseInfo.description; if( description.empty() ) description = "(NO DESCRIPTION)"; Catch::cout() << Text( description, descAttr ) << std::endl; } if( !testCaseInfo.tags.empty() ) Catch::cout() << Text( testCaseInfo.tagsAsString, tagsAttr ) << std::endl; } if( !config.testSpec().hasFilters() ) Catch::cout() << pluralise( matchedTests, "test case" ) << '\n' << std::endl; else Catch::cout() << pluralise( matchedTests, "matching test case" ) << '\n' << std::endl; return matchedTests; } inline std::size_t listTestsNamesOnly( Config const& config ) { TestSpec testSpec = config.testSpec(); if( !config.testSpec().hasFilters() ) testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); std::size_t matchedTests = 0; std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); it != itEnd; ++it ) { matchedTests++; TestCaseInfo const& testCaseInfo = it->getTestCaseInfo(); if( startsWith( testCaseInfo.name, '#' ) ) Catch::cout() << '"' << testCaseInfo.name << '"'; else Catch::cout() << testCaseInfo.name; if ( config.listExtraInfo() ) Catch::cout() << "\t@" << testCaseInfo.lineInfo; Catch::cout() << std::endl; } return matchedTests; } struct TagInfo { TagInfo() : count ( 0 ) {} void add( std::string const& spelling ) { ++count; spellings.insert( spelling ); } std::string all() const { std::string out; for( std::set::const_iterator it = spellings.begin(), itEnd = spellings.end(); it != itEnd; ++it ) out += "[" + *it + "]"; return out; } std::set spellings; std::size_t count; }; inline std::size_t listTags( Config const& config ) { TestSpec testSpec = config.testSpec(); if( config.testSpec().hasFilters() ) Catch::cout() << "Tags for matching test cases:\n"; else { Catch::cout() << "All available tags:\n"; testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "*" ).testSpec(); } std::map tagCounts; std::vector matchedTestCases = filterTests( getAllTestCasesSorted( config ), testSpec, config ); for( std::vector::const_iterator it = matchedTestCases.begin(), itEnd = matchedTestCases.end(); it != itEnd; ++it ) { for( std::set::const_iterator tagIt = it->getTestCaseInfo().tags.begin(), tagItEnd = it->getTestCaseInfo().tags.end(); tagIt != tagItEnd; ++tagIt ) { std::string tagName = *tagIt; std::string lcaseTagName = toLower( tagName ); std::map::iterator countIt = tagCounts.find( lcaseTagName ); if( countIt == tagCounts.end() ) countIt = tagCounts.insert( std::make_pair( lcaseTagName, TagInfo() ) ).first; countIt->second.add( tagName ); } } for( std::map::const_iterator countIt = tagCounts.begin(), countItEnd = tagCounts.end(); countIt != countItEnd; ++countIt ) { std::ostringstream oss; oss << " " << std::setw(2) << countIt->second.count << " "; Text wrapper( countIt->second.all(), TextAttributes() .setInitialIndent( 0 ) .setIndent( oss.str().size() ) .setWidth( CATCH_CONFIG_CONSOLE_WIDTH-10 ) ); Catch::cout() << oss.str() << wrapper << '\n'; } Catch::cout() << pluralise( tagCounts.size(), "tag" ) << '\n' << std::endl; return tagCounts.size(); } inline std::size_t listReporters( Config const& /*config*/ ) { Catch::cout() << "Available reporters:\n"; IReporterRegistry::FactoryMap const& factories = getRegistryHub().getReporterRegistry().getFactories(); IReporterRegistry::FactoryMap::const_iterator itBegin = factories.begin(), itEnd = factories.end(), it; std::size_t maxNameLen = 0; for(it = itBegin; it != itEnd; ++it ) maxNameLen = (std::max)( maxNameLen, it->first.size() ); for(it = itBegin; it != itEnd; ++it ) { Text wrapper( it->second->getDescription(), TextAttributes() .setInitialIndent( 0 ) .setIndent( 7+maxNameLen ) .setWidth( CATCH_CONFIG_CONSOLE_WIDTH - maxNameLen-8 ) ); Catch::cout() << " " << it->first << ':' << std::string( maxNameLen - it->first.size() + 2, ' ' ) << wrapper << '\n'; } Catch::cout() << std::endl; return factories.size(); } inline Option list( Config const& config ) { Option listedCount; if( config.listTests() || ( config.listExtraInfo() && !config.listTestNamesOnly() ) ) listedCount = listedCount.valueOr(0) + listTests( config ); if( config.listTestNamesOnly() ) listedCount = listedCount.valueOr(0) + listTestsNamesOnly( config ); if( config.listTags() ) listedCount = listedCount.valueOr(0) + listTags( config ); if( config.listReporters() ) listedCount = listedCount.valueOr(0) + listReporters( config ); return listedCount; } } // end namespace Catch // #included from: internal/catch_run_context.hpp #define TWOBLUECUBES_CATCH_RUNNER_IMPL_HPP_INCLUDED // #included from: catch_test_case_tracker.hpp #define TWOBLUECUBES_CATCH_TEST_CASE_TRACKER_HPP_INCLUDED #include #include #include #include #include CATCH_INTERNAL_SUPPRESS_ETD_WARNINGS namespace Catch { namespace TestCaseTracking { struct NameAndLocation { std::string name; SourceLineInfo location; NameAndLocation( std::string const& _name, SourceLineInfo const& _location ) : name( _name ), location( _location ) {} }; struct ITracker : SharedImpl<> { virtual ~ITracker(); // static queries virtual NameAndLocation const& nameAndLocation() const = 0; // dynamic queries virtual bool isComplete() const = 0; // Successfully completed or failed virtual bool isSuccessfullyCompleted() const = 0; virtual bool isOpen() const = 0; // Started but not complete virtual bool hasChildren() const = 0; virtual ITracker& parent() = 0; // actions virtual void close() = 0; // Successfully complete virtual void fail() = 0; virtual void markAsNeedingAnotherRun() = 0; virtual void addChild( Ptr const& child ) = 0; virtual ITracker* findChild( NameAndLocation const& nameAndLocation ) = 0; virtual void openChild() = 0; // Debug/ checking virtual bool isSectionTracker() const = 0; virtual bool isIndexTracker() const = 0; }; class TrackerContext { enum RunState { NotStarted, Executing, CompletedCycle }; Ptr m_rootTracker; ITracker* m_currentTracker; RunState m_runState; public: static TrackerContext& instance() { static TrackerContext s_instance; return s_instance; } TrackerContext() : m_currentTracker( CATCH_NULL ), m_runState( NotStarted ) {} ITracker& startRun(); void endRun() { m_rootTracker.reset(); m_currentTracker = CATCH_NULL; m_runState = NotStarted; } void startCycle() { m_currentTracker = m_rootTracker.get(); m_runState = Executing; } void completeCycle() { m_runState = CompletedCycle; } bool completedCycle() const { return m_runState == CompletedCycle; } ITracker& currentTracker() { return *m_currentTracker; } void setCurrentTracker( ITracker* tracker ) { m_currentTracker = tracker; } }; class TrackerBase : public ITracker { protected: enum CycleState { NotStarted, Executing, ExecutingChildren, NeedsAnotherRun, CompletedSuccessfully, Failed }; class TrackerHasName { NameAndLocation m_nameAndLocation; public: TrackerHasName( NameAndLocation const& nameAndLocation ) : m_nameAndLocation( nameAndLocation ) {} bool operator ()( Ptr const& tracker ) { return tracker->nameAndLocation().name == m_nameAndLocation.name && tracker->nameAndLocation().location == m_nameAndLocation.location; } }; typedef std::vector > Children; NameAndLocation m_nameAndLocation; TrackerContext& m_ctx; ITracker* m_parent; Children m_children; CycleState m_runState; public: TrackerBase( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent ) : m_nameAndLocation( nameAndLocation ), m_ctx( ctx ), m_parent( parent ), m_runState( NotStarted ) {} virtual ~TrackerBase(); virtual NameAndLocation const& nameAndLocation() const CATCH_OVERRIDE { return m_nameAndLocation; } virtual bool isComplete() const CATCH_OVERRIDE { return m_runState == CompletedSuccessfully || m_runState == Failed; } virtual bool isSuccessfullyCompleted() const CATCH_OVERRIDE { return m_runState == CompletedSuccessfully; } virtual bool isOpen() const CATCH_OVERRIDE { return m_runState != NotStarted && !isComplete(); } virtual bool hasChildren() const CATCH_OVERRIDE { return !m_children.empty(); } virtual void addChild( Ptr const& child ) CATCH_OVERRIDE { m_children.push_back( child ); } virtual ITracker* findChild( NameAndLocation const& nameAndLocation ) CATCH_OVERRIDE { Children::const_iterator it = std::find_if( m_children.begin(), m_children.end(), TrackerHasName( nameAndLocation ) ); return( it != m_children.end() ) ? it->get() : CATCH_NULL; } virtual ITracker& parent() CATCH_OVERRIDE { assert( m_parent ); // Should always be non-null except for root return *m_parent; } virtual void openChild() CATCH_OVERRIDE { if( m_runState != ExecutingChildren ) { m_runState = ExecutingChildren; if( m_parent ) m_parent->openChild(); } } virtual bool isSectionTracker() const CATCH_OVERRIDE { return false; } virtual bool isIndexTracker() const CATCH_OVERRIDE { return false; } void open() { m_runState = Executing; moveToThis(); if( m_parent ) m_parent->openChild(); } virtual void close() CATCH_OVERRIDE { // Close any still open children (e.g. generators) while( &m_ctx.currentTracker() != this ) m_ctx.currentTracker().close(); switch( m_runState ) { case NotStarted: case CompletedSuccessfully: case Failed: throw std::logic_error( "Illogical state" ); case NeedsAnotherRun: break;; case Executing: m_runState = CompletedSuccessfully; break; case ExecutingChildren: if( m_children.empty() || m_children.back()->isComplete() ) m_runState = CompletedSuccessfully; break; default: throw std::logic_error( "Unexpected state" ); } moveToParent(); m_ctx.completeCycle(); } virtual void fail() CATCH_OVERRIDE { m_runState = Failed; if( m_parent ) m_parent->markAsNeedingAnotherRun(); moveToParent(); m_ctx.completeCycle(); } virtual void markAsNeedingAnotherRun() CATCH_OVERRIDE { m_runState = NeedsAnotherRun; } private: void moveToParent() { assert( m_parent ); m_ctx.setCurrentTracker( m_parent ); } void moveToThis() { m_ctx.setCurrentTracker( this ); } }; class SectionTracker : public TrackerBase { std::vector m_filters; public: SectionTracker( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent ) : TrackerBase( nameAndLocation, ctx, parent ) { if( parent ) { while( !parent->isSectionTracker() ) parent = &parent->parent(); SectionTracker& parentSection = static_cast( *parent ); addNextFilters( parentSection.m_filters ); } } virtual ~SectionTracker(); virtual bool isSectionTracker() const CATCH_OVERRIDE { return true; } static SectionTracker& acquire( TrackerContext& ctx, NameAndLocation const& nameAndLocation ) { SectionTracker* section = CATCH_NULL; ITracker& currentTracker = ctx.currentTracker(); if( ITracker* childTracker = currentTracker.findChild( nameAndLocation ) ) { assert( childTracker ); assert( childTracker->isSectionTracker() ); section = static_cast( childTracker ); } else { section = new SectionTracker( nameAndLocation, ctx, ¤tTracker ); currentTracker.addChild( section ); } if( !ctx.completedCycle() ) section->tryOpen(); return *section; } void tryOpen() { if( !isComplete() && (m_filters.empty() || m_filters[0].empty() || m_filters[0] == m_nameAndLocation.name ) ) open(); } void addInitialFilters( std::vector const& filters ) { if( !filters.empty() ) { m_filters.push_back(""); // Root - should never be consulted m_filters.push_back(""); // Test Case - not a section filter m_filters.insert( m_filters.end(), filters.begin(), filters.end() ); } } void addNextFilters( std::vector const& filters ) { if( filters.size() > 1 ) m_filters.insert( m_filters.end(), ++filters.begin(), filters.end() ); } }; class IndexTracker : public TrackerBase { int m_size; int m_index; public: IndexTracker( NameAndLocation const& nameAndLocation, TrackerContext& ctx, ITracker* parent, int size ) : TrackerBase( nameAndLocation, ctx, parent ), m_size( size ), m_index( -1 ) {} virtual ~IndexTracker(); virtual bool isIndexTracker() const CATCH_OVERRIDE { return true; } static IndexTracker& acquire( TrackerContext& ctx, NameAndLocation const& nameAndLocation, int size ) { IndexTracker* tracker = CATCH_NULL; ITracker& currentTracker = ctx.currentTracker(); if( ITracker* childTracker = currentTracker.findChild( nameAndLocation ) ) { assert( childTracker ); assert( childTracker->isIndexTracker() ); tracker = static_cast( childTracker ); } else { tracker = new IndexTracker( nameAndLocation, ctx, ¤tTracker, size ); currentTracker.addChild( tracker ); } if( !ctx.completedCycle() && !tracker->isComplete() ) { if( tracker->m_runState != ExecutingChildren && tracker->m_runState != NeedsAnotherRun ) tracker->moveNext(); tracker->open(); } return *tracker; } int index() const { return m_index; } void moveNext() { m_index++; m_children.clear(); } virtual void close() CATCH_OVERRIDE { TrackerBase::close(); if( m_runState == CompletedSuccessfully && m_index < m_size-1 ) m_runState = Executing; } }; inline ITracker& TrackerContext::startRun() { m_rootTracker = new SectionTracker( NameAndLocation( "{root}", CATCH_INTERNAL_LINEINFO ), *this, CATCH_NULL ); m_currentTracker = CATCH_NULL; m_runState = Executing; return *m_rootTracker; } } // namespace TestCaseTracking using TestCaseTracking::ITracker; using TestCaseTracking::TrackerContext; using TestCaseTracking::SectionTracker; using TestCaseTracking::IndexTracker; } // namespace Catch CATCH_INTERNAL_UNSUPPRESS_ETD_WARNINGS // #included from: catch_fatal_condition.hpp #define TWOBLUECUBES_CATCH_FATAL_CONDITION_H_INCLUDED namespace Catch { // Report the error condition inline void reportFatal( std::string const& message ) { IContext& context = Catch::getCurrentContext(); IResultCapture* resultCapture = context.getResultCapture(); resultCapture->handleFatalErrorCondition( message ); } } // namespace Catch #if defined ( CATCH_PLATFORM_WINDOWS ) ///////////////////////////////////////// // #included from: catch_windows_h_proxy.h #define TWOBLUECUBES_CATCH_WINDOWS_H_PROXY_H_INCLUDED #ifdef CATCH_DEFINES_NOMINMAX # define NOMINMAX #endif #ifdef CATCH_DEFINES_WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN #endif #ifdef __AFXDLL #include #else #include #endif #ifdef CATCH_DEFINES_NOMINMAX # undef NOMINMAX #endif #ifdef CATCH_DEFINES_WIN32_LEAN_AND_MEAN # undef WIN32_LEAN_AND_MEAN #endif # if !defined ( CATCH_CONFIG_WINDOWS_SEH ) namespace Catch { struct FatalConditionHandler { void reset() {} }; } # else // CATCH_CONFIG_WINDOWS_SEH is defined namespace Catch { struct SignalDefs { DWORD id; const char* name; }; extern SignalDefs signalDefs[]; // There is no 1-1 mapping between signals and windows exceptions. // Windows can easily distinguish between SO and SigSegV, // but SigInt, SigTerm, etc are handled differently. SignalDefs signalDefs[] = { { EXCEPTION_ILLEGAL_INSTRUCTION, "SIGILL - Illegal instruction signal" }, { EXCEPTION_STACK_OVERFLOW, "SIGSEGV - Stack overflow" }, { EXCEPTION_ACCESS_VIOLATION, "SIGSEGV - Segmentation violation signal" }, { EXCEPTION_INT_DIVIDE_BY_ZERO, "Divide by zero error" }, }; struct FatalConditionHandler { static LONG CALLBACK handleVectoredException(PEXCEPTION_POINTERS ExceptionInfo) { for (int i = 0; i < sizeof(signalDefs) / sizeof(SignalDefs); ++i) { if (ExceptionInfo->ExceptionRecord->ExceptionCode == signalDefs[i].id) { reportFatal(signalDefs[i].name); } } // If its not an exception we care about, pass it along. // This stops us from eating debugger breaks etc. return EXCEPTION_CONTINUE_SEARCH; } FatalConditionHandler() { isSet = true; // 32k seems enough for Catch to handle stack overflow, // but the value was found experimentally, so there is no strong guarantee guaranteeSize = 32 * 1024; exceptionHandlerHandle = CATCH_NULL; // Register as first handler in current chain exceptionHandlerHandle = AddVectoredExceptionHandler(1, handleVectoredException); // Pass in guarantee size to be filled SetThreadStackGuarantee(&guaranteeSize); } static void reset() { if (isSet) { // Unregister handler and restore the old guarantee RemoveVectoredExceptionHandler(exceptionHandlerHandle); SetThreadStackGuarantee(&guaranteeSize); exceptionHandlerHandle = CATCH_NULL; isSet = false; } } ~FatalConditionHandler() { reset(); } private: static bool isSet; static ULONG guaranteeSize; static PVOID exceptionHandlerHandle; }; bool FatalConditionHandler::isSet = false; ULONG FatalConditionHandler::guaranteeSize = 0; PVOID FatalConditionHandler::exceptionHandlerHandle = CATCH_NULL; } // namespace Catch # endif // CATCH_CONFIG_WINDOWS_SEH #else // Not Windows - assumed to be POSIX compatible ////////////////////////// # if !defined(CATCH_CONFIG_POSIX_SIGNALS) namespace Catch { struct FatalConditionHandler { void reset() {} }; } # else // CATCH_CONFIG_POSIX_SIGNALS is defined #include namespace Catch { struct SignalDefs { int id; const char* name; }; extern SignalDefs signalDefs[]; SignalDefs signalDefs[] = { { SIGINT, "SIGINT - Terminal interrupt signal" }, { SIGILL, "SIGILL - Illegal instruction signal" }, { SIGFPE, "SIGFPE - Floating point error signal" }, { SIGSEGV, "SIGSEGV - Segmentation violation signal" }, { SIGTERM, "SIGTERM - Termination request signal" }, { SIGABRT, "SIGABRT - Abort (abnormal termination) signal" } }; struct FatalConditionHandler { static bool isSet; static struct sigaction oldSigActions [sizeof(signalDefs)/sizeof(SignalDefs)]; static stack_t oldSigStack; static char altStackMem[32768]; static void handleSignal( int sig ) { std::string name = ""; for (std::size_t i = 0; i < sizeof(signalDefs) / sizeof(SignalDefs); ++i) { SignalDefs &def = signalDefs[i]; if (sig == def.id) { name = def.name; break; } } reset(); reportFatal(name); raise( sig ); } FatalConditionHandler() { isSet = true; stack_t sigStack; sigStack.ss_sp = altStackMem; sigStack.ss_size = 32768; sigStack.ss_flags = 0; sigaltstack(&sigStack, &oldSigStack); struct sigaction sa = { 0 }; sa.sa_handler = handleSignal; sa.sa_flags = SA_ONSTACK; for (std::size_t i = 0; i < sizeof(signalDefs)/sizeof(SignalDefs); ++i) { sigaction(signalDefs[i].id, &sa, &oldSigActions[i]); } } ~FatalConditionHandler() { reset(); } static void reset() { if( isSet ) { // Set signals back to previous values -- hopefully nobody overwrote them in the meantime for( std::size_t i = 0; i < sizeof(signalDefs)/sizeof(SignalDefs); ++i ) { sigaction(signalDefs[i].id, &oldSigActions[i], CATCH_NULL); } // Return the old stack sigaltstack(&oldSigStack, CATCH_NULL); isSet = false; } } }; bool FatalConditionHandler::isSet = false; struct sigaction FatalConditionHandler::oldSigActions[sizeof(signalDefs)/sizeof(SignalDefs)] = {}; stack_t FatalConditionHandler::oldSigStack = {}; char FatalConditionHandler::altStackMem[32768] = {}; } // namespace Catch # endif // CATCH_CONFIG_POSIX_SIGNALS #endif // not Windows #include #include namespace Catch { class StreamRedirect { public: StreamRedirect( std::ostream& stream, std::string& targetString ) : m_stream( stream ), m_prevBuf( stream.rdbuf() ), m_targetString( targetString ) { stream.rdbuf( m_oss.rdbuf() ); } ~StreamRedirect() { m_targetString += m_oss.str(); m_stream.rdbuf( m_prevBuf ); } private: std::ostream& m_stream; std::streambuf* m_prevBuf; std::ostringstream m_oss; std::string& m_targetString; }; /////////////////////////////////////////////////////////////////////////// class RunContext : public IResultCapture, public IRunner { RunContext( RunContext const& ); void operator =( RunContext const& ); public: explicit RunContext( Ptr const& _config, Ptr const& reporter ) : m_runInfo( _config->name() ), m_context( getCurrentMutableContext() ), m_activeTestCase( CATCH_NULL ), m_config( _config ), m_reporter( reporter ), m_shouldReportUnexpected ( true ) { m_context.setRunner( this ); m_context.setConfig( m_config ); m_context.setResultCapture( this ); m_reporter->testRunStarting( m_runInfo ); } virtual ~RunContext() { m_reporter->testRunEnded( TestRunStats( m_runInfo, m_totals, aborting() ) ); } void testGroupStarting( std::string const& testSpec, std::size_t groupIndex, std::size_t groupsCount ) { m_reporter->testGroupStarting( GroupInfo( testSpec, groupIndex, groupsCount ) ); } void testGroupEnded( std::string const& testSpec, Totals const& totals, std::size_t groupIndex, std::size_t groupsCount ) { m_reporter->testGroupEnded( TestGroupStats( GroupInfo( testSpec, groupIndex, groupsCount ), totals, aborting() ) ); } Totals runTest( TestCase const& testCase ) { Totals prevTotals = m_totals; std::string redirectedCout; std::string redirectedCerr; TestCaseInfo testInfo = testCase.getTestCaseInfo(); m_reporter->testCaseStarting( testInfo ); m_activeTestCase = &testCase; do { ITracker& rootTracker = m_trackerContext.startRun(); assert( rootTracker.isSectionTracker() ); static_cast( rootTracker ).addInitialFilters( m_config->getSectionsToRun() ); do { m_trackerContext.startCycle(); m_testCaseTracker = &SectionTracker::acquire( m_trackerContext, TestCaseTracking::NameAndLocation( testInfo.name, testInfo.lineInfo ) ); runCurrentTest( redirectedCout, redirectedCerr ); } while( !m_testCaseTracker->isSuccessfullyCompleted() && !aborting() ); } // !TBD: deprecated - this will be replaced by indexed trackers while( getCurrentContext().advanceGeneratorsForCurrentTest() && !aborting() ); Totals deltaTotals = m_totals.delta( prevTotals ); if( testInfo.expectedToFail() && deltaTotals.testCases.passed > 0 ) { deltaTotals.assertions.failed++; deltaTotals.testCases.passed--; deltaTotals.testCases.failed++; } m_totals.testCases += deltaTotals.testCases; m_reporter->testCaseEnded( TestCaseStats( testInfo, deltaTotals, redirectedCout, redirectedCerr, aborting() ) ); m_activeTestCase = CATCH_NULL; m_testCaseTracker = CATCH_NULL; return deltaTotals; } Ptr config() const { return m_config; } private: // IResultCapture virtual void assertionEnded( AssertionResult const& result ) { if( result.getResultType() == ResultWas::Ok ) { m_totals.assertions.passed++; } else if( !result.isOk() ) { m_totals.assertions.failed++; } // We have no use for the return value (whether messages should be cleared), because messages were made scoped // and should be let to clear themselves out. static_cast(m_reporter->assertionEnded(AssertionStats(result, m_messages, m_totals))); // Reset working state m_lastAssertionInfo = AssertionInfo( "", m_lastAssertionInfo.lineInfo, "{Unknown expression after the reported line}" , m_lastAssertionInfo.resultDisposition ); m_lastResult = result; } virtual bool sectionStarted ( SectionInfo const& sectionInfo, Counts& assertions ) { ITracker& sectionTracker = SectionTracker::acquire( m_trackerContext, TestCaseTracking::NameAndLocation( sectionInfo.name, sectionInfo.lineInfo ) ); if( !sectionTracker.isOpen() ) return false; m_activeSections.push_back( §ionTracker ); m_lastAssertionInfo.lineInfo = sectionInfo.lineInfo; m_reporter->sectionStarting( sectionInfo ); assertions = m_totals.assertions; return true; } bool testForMissingAssertions( Counts& assertions ) { if( assertions.total() != 0 ) return false; if( !m_config->warnAboutMissingAssertions() ) return false; if( m_trackerContext.currentTracker().hasChildren() ) return false; m_totals.assertions.failed++; assertions.failed++; return true; } virtual void sectionEnded( SectionEndInfo const& endInfo ) { Counts assertions = m_totals.assertions - endInfo.prevAssertions; bool missingAssertions = testForMissingAssertions( assertions ); if( !m_activeSections.empty() ) { m_activeSections.back()->close(); m_activeSections.pop_back(); } m_reporter->sectionEnded( SectionStats( endInfo.sectionInfo, assertions, endInfo.durationInSeconds, missingAssertions ) ); m_messages.clear(); } virtual void sectionEndedEarly( SectionEndInfo const& endInfo ) { if( m_unfinishedSections.empty() ) m_activeSections.back()->fail(); else m_activeSections.back()->close(); m_activeSections.pop_back(); m_unfinishedSections.push_back( endInfo ); } virtual void pushScopedMessage( MessageInfo const& message ) { m_messages.push_back( message ); } virtual void popScopedMessage( MessageInfo const& message ) { m_messages.erase( std::remove( m_messages.begin(), m_messages.end(), message ), m_messages.end() ); } virtual std::string getCurrentTestName() const { return m_activeTestCase ? m_activeTestCase->getTestCaseInfo().name : std::string(); } virtual const AssertionResult* getLastResult() const { return &m_lastResult; } virtual void exceptionEarlyReported() { m_shouldReportUnexpected = false; } virtual void handleFatalErrorCondition( std::string const& message ) { // Don't rebuild the result -- the stringification itself can cause more fatal errors // Instead, fake a result data. AssertionResultData tempResult; tempResult.resultType = ResultWas::FatalErrorCondition; tempResult.message = message; AssertionResult result(m_lastAssertionInfo, tempResult); getResultCapture().assertionEnded(result); handleUnfinishedSections(); // Recreate section for test case (as we will lose the one that was in scope) TestCaseInfo const& testCaseInfo = m_activeTestCase->getTestCaseInfo(); SectionInfo testCaseSection( testCaseInfo.lineInfo, testCaseInfo.name, testCaseInfo.description ); Counts assertions; assertions.failed = 1; SectionStats testCaseSectionStats( testCaseSection, assertions, 0, false ); m_reporter->sectionEnded( testCaseSectionStats ); TestCaseInfo testInfo = m_activeTestCase->getTestCaseInfo(); Totals deltaTotals; deltaTotals.testCases.failed = 1; m_reporter->testCaseEnded( TestCaseStats( testInfo, deltaTotals, std::string(), std::string(), false ) ); m_totals.testCases.failed++; testGroupEnded( std::string(), m_totals, 1, 1 ); m_reporter->testRunEnded( TestRunStats( m_runInfo, m_totals, false ) ); } public: // !TBD We need to do this another way! bool aborting() const { return m_totals.assertions.failed == static_cast( m_config->abortAfter() ); } private: void runCurrentTest( std::string& redirectedCout, std::string& redirectedCerr ) { TestCaseInfo const& testCaseInfo = m_activeTestCase->getTestCaseInfo(); SectionInfo testCaseSection( testCaseInfo.lineInfo, testCaseInfo.name, testCaseInfo.description ); m_reporter->sectionStarting( testCaseSection ); Counts prevAssertions = m_totals.assertions; double duration = 0; m_shouldReportUnexpected = true; try { m_lastAssertionInfo = AssertionInfo( "TEST_CASE", testCaseInfo.lineInfo, "", ResultDisposition::Normal ); seedRng( *m_config ); Timer timer; timer.start(); if( m_reporter->getPreferences().shouldRedirectStdOut ) { StreamRedirect coutRedir( Catch::cout(), redirectedCout ); StreamRedirect cerrRedir( Catch::cerr(), redirectedCerr ); invokeActiveTestCase(); } else { invokeActiveTestCase(); } duration = timer.getElapsedSeconds(); } catch( TestFailureException& ) { // This just means the test was aborted due to failure } catch(...) { // Under CATCH_CONFIG_FAST_COMPILE, unexpected exceptions under REQUIRE assertions // are reported without translation at the point of origin. if (m_shouldReportUnexpected) { makeUnexpectedResultBuilder().useActiveException(); } } m_testCaseTracker->close(); handleUnfinishedSections(); m_messages.clear(); Counts assertions = m_totals.assertions - prevAssertions; bool missingAssertions = testForMissingAssertions( assertions ); if( testCaseInfo.okToFail() ) { std::swap( assertions.failedButOk, assertions.failed ); m_totals.assertions.failed -= assertions.failedButOk; m_totals.assertions.failedButOk += assertions.failedButOk; } SectionStats testCaseSectionStats( testCaseSection, assertions, duration, missingAssertions ); m_reporter->sectionEnded( testCaseSectionStats ); } void invokeActiveTestCase() { FatalConditionHandler fatalConditionHandler; // Handle signals m_activeTestCase->invoke(); fatalConditionHandler.reset(); } private: ResultBuilder makeUnexpectedResultBuilder() const { return ResultBuilder( m_lastAssertionInfo.macroName, m_lastAssertionInfo.lineInfo, m_lastAssertionInfo.capturedExpression, m_lastAssertionInfo.resultDisposition ); } void handleUnfinishedSections() { // If sections ended prematurely due to an exception we stored their // infos here so we can tear them down outside the unwind process. for( std::vector::const_reverse_iterator it = m_unfinishedSections.rbegin(), itEnd = m_unfinishedSections.rend(); it != itEnd; ++it ) sectionEnded( *it ); m_unfinishedSections.clear(); } TestRunInfo m_runInfo; IMutableContext& m_context; TestCase const* m_activeTestCase; ITracker* m_testCaseTracker; ITracker* m_currentSectionTracker; AssertionResult m_lastResult; Ptr m_config; Totals m_totals; Ptr m_reporter; std::vector m_messages; AssertionInfo m_lastAssertionInfo; std::vector m_unfinishedSections; std::vector m_activeSections; TrackerContext m_trackerContext; bool m_shouldReportUnexpected; }; IResultCapture& getResultCapture() { if( IResultCapture* capture = getCurrentContext().getResultCapture() ) return *capture; else throw std::logic_error( "No result capture instance" ); } } // end namespace Catch // #included from: internal/catch_version.h #define TWOBLUECUBES_CATCH_VERSION_H_INCLUDED namespace Catch { // Versioning information struct Version { Version( unsigned int _majorVersion, unsigned int _minorVersion, unsigned int _patchNumber, char const * const _branchName, unsigned int _buildNumber ); unsigned int const majorVersion; unsigned int const minorVersion; unsigned int const patchNumber; // buildNumber is only used if branchName is not null char const * const branchName; unsigned int const buildNumber; friend std::ostream& operator << ( std::ostream& os, Version const& version ); private: void operator=( Version const& ); }; inline Version libraryVersion(); } #include #include #include namespace Catch { Ptr createReporter( std::string const& reporterName, Ptr const& config ) { Ptr reporter = getRegistryHub().getReporterRegistry().create( reporterName, config.get() ); if( !reporter ) { std::ostringstream oss; oss << "No reporter registered with name: '" << reporterName << "'"; throw std::domain_error( oss.str() ); } return reporter; } Ptr makeReporter( Ptr const& config ) { std::vector reporters = config->getReporterNames(); if( reporters.empty() ) reporters.push_back( "console" ); Ptr reporter; for( std::vector::const_iterator it = reporters.begin(), itEnd = reporters.end(); it != itEnd; ++it ) reporter = addReporter( reporter, createReporter( *it, config ) ); return reporter; } Ptr addListeners( Ptr const& config, Ptr reporters ) { IReporterRegistry::Listeners listeners = getRegistryHub().getReporterRegistry().getListeners(); for( IReporterRegistry::Listeners::const_iterator it = listeners.begin(), itEnd = listeners.end(); it != itEnd; ++it ) reporters = addReporter(reporters, (*it)->create( ReporterConfig( config ) ) ); return reporters; } Totals runTests( Ptr const& config ) { Ptr iconfig = config.get(); Ptr reporter = makeReporter( config ); reporter = addListeners( iconfig, reporter ); RunContext context( iconfig, reporter ); Totals totals; context.testGroupStarting( config->name(), 1, 1 ); TestSpec testSpec = config->testSpec(); if( !testSpec.hasFilters() ) testSpec = TestSpecParser( ITagAliasRegistry::get() ).parse( "~[.]" ).testSpec(); // All not hidden tests std::vector const& allTestCases = getAllTestCasesSorted( *iconfig ); for( std::vector::const_iterator it = allTestCases.begin(), itEnd = allTestCases.end(); it != itEnd; ++it ) { if( !context.aborting() && matchTest( *it, testSpec, *iconfig ) ) totals += context.runTest( *it ); else reporter->skipTest( *it ); } context.testGroupEnded( iconfig->name(), totals, 1, 1 ); return totals; } void applyFilenamesAsTags( IConfig const& config ) { std::vector const& tests = getAllTestCasesSorted( config ); for(std::size_t i = 0; i < tests.size(); ++i ) { TestCase& test = const_cast( tests[i] ); std::set tags = test.tags; std::string filename = test.lineInfo.file; std::string::size_type lastSlash = filename.find_last_of( "\\/" ); if( lastSlash != std::string::npos ) filename = filename.substr( lastSlash+1 ); std::string::size_type lastDot = filename.find_last_of( "." ); if( lastDot != std::string::npos ) filename = filename.substr( 0, lastDot ); tags.insert( "#" + filename ); setTags( test, tags ); } } class Session : NonCopyable { static bool alreadyInstantiated; public: struct OnUnusedOptions { enum DoWhat { Ignore, Fail }; }; Session() : m_cli( makeCommandLineParser() ) { if( alreadyInstantiated ) { std::string msg = "Only one instance of Catch::Session can ever be used"; Catch::cerr() << msg << std::endl; throw std::logic_error( msg ); } alreadyInstantiated = true; } ~Session() { Catch::cleanUp(); } void showHelp( std::string const& processName ) { Catch::cout() << "\nCatch v" << libraryVersion() << "\n"; m_cli.usage( Catch::cout(), processName ); Catch::cout() << "For more detail usage please see the project docs\n" << std::endl; } int applyCommandLine( int argc, char const* const* const argv, OnUnusedOptions::DoWhat unusedOptionBehaviour = OnUnusedOptions::Fail ) { try { m_cli.setThrowOnUnrecognisedTokens( unusedOptionBehaviour == OnUnusedOptions::Fail ); m_unusedTokens = m_cli.parseInto( Clara::argsToVector( argc, argv ), m_configData ); if( m_configData.showHelp ) showHelp( m_configData.processName ); m_config.reset(); } catch( std::exception& ex ) { { Colour colourGuard( Colour::Red ); Catch::cerr() << "\nError(s) in input:\n" << Text( ex.what(), TextAttributes().setIndent(2) ) << "\n\n"; } m_cli.usage( Catch::cout(), m_configData.processName ); return (std::numeric_limits::max)(); } return 0; } void useConfigData( ConfigData const& _configData ) { m_configData = _configData; m_config.reset(); } int run( int argc, char const* const* const argv ) { int returnCode = applyCommandLine( argc, argv ); if( returnCode == 0 ) returnCode = run(); return returnCode; } #if defined(WIN32) && defined(UNICODE) int run( int argc, wchar_t const* const* const argv ) { char **utf8Argv = new char *[ argc ]; for ( int i = 0; i < argc; ++i ) { int bufSize = WideCharToMultiByte( CP_UTF8, 0, argv[i], -1, NULL, 0, NULL, NULL ); utf8Argv[ i ] = new char[ bufSize ]; WideCharToMultiByte( CP_UTF8, 0, argv[i], -1, utf8Argv[i], bufSize, NULL, NULL ); } int returnCode = applyCommandLine( argc, utf8Argv ); if( returnCode == 0 ) returnCode = run(); for ( int i = 0; i < argc; ++i ) delete [] utf8Argv[ i ]; delete [] utf8Argv; return returnCode; } #endif int run() { if( m_configData.showHelp ) return 0; try { config(); // Force config to be constructed seedRng( *m_config ); if( m_configData.filenamesAsTags ) applyFilenamesAsTags( *m_config ); // Handle list request if( Option listed = list( config() ) ) return static_cast( *listed ); return static_cast( runTests( m_config ).assertions.failed ); } catch( std::exception& ex ) { Catch::cerr() << ex.what() << std::endl; return (std::numeric_limits::max)(); } } Clara::CommandLine const& cli() const { return m_cli; } std::vector const& unusedTokens() const { return m_unusedTokens; } ConfigData& configData() { return m_configData; } Config& config() { if( !m_config ) m_config = new Config( m_configData ); return *m_config; } private: Clara::CommandLine m_cli; std::vector m_unusedTokens; ConfigData m_configData; Ptr m_config; }; bool Session::alreadyInstantiated = false; } // end namespace Catch // #included from: catch_registry_hub.hpp #define TWOBLUECUBES_CATCH_REGISTRY_HUB_HPP_INCLUDED // #included from: catch_test_case_registry_impl.hpp #define TWOBLUECUBES_CATCH_TEST_CASE_REGISTRY_IMPL_HPP_INCLUDED #include #include #include #include #ifdef CATCH_CONFIG_CPP11_SHUFFLE #include #endif namespace Catch { struct RandomNumberGenerator { template static void shuffle( V& vector ) { #ifdef CATCH_CONFIG_CPP11_SHUFFLE std::random_device device; std::mt19937 rng( device() ); std::shuffle( vector.begin(), vector.end(), rng ); #else std::random_shuffle( vector.begin(), vector.end() ); #endif } }; inline std::vector sortTests( IConfig const& config, std::vector const& unsortedTestCases ) { std::vector sorted = unsortedTestCases; switch( config.runOrder() ) { case RunTests::InLexicographicalOrder: std::sort( sorted.begin(), sorted.end() ); break; case RunTests::InRandomOrder: { seedRng( config ); RandomNumberGenerator::shuffle( sorted ); } break; case RunTests::InDeclarationOrder: // already in declaration order break; } return sorted; } bool matchTest( TestCase const& testCase, TestSpec const& testSpec, IConfig const& config ) { return testSpec.matches( testCase ) && ( config.allowThrows() || !testCase.throws() ); } void enforceNoDuplicateTestCases( std::vector const& functions ) { std::set seenFunctions; for( std::vector::const_iterator it = functions.begin(), itEnd = functions.end(); it != itEnd; ++it ) { std::pair::const_iterator, bool> prev = seenFunctions.insert( *it ); if( !prev.second ) { std::ostringstream ss; ss << Colour( Colour::Red ) << "error: TEST_CASE( \"" << it->name << "\" ) already defined.\n" << "\tFirst seen at " << prev.first->getTestCaseInfo().lineInfo << '\n' << "\tRedefined at " << it->getTestCaseInfo().lineInfo << std::endl; throw std::runtime_error(ss.str()); } } } std::vector filterTests( std::vector const& testCases, TestSpec const& testSpec, IConfig const& config ) { std::vector filtered; filtered.reserve( testCases.size() ); for( std::vector::const_iterator it = testCases.begin(), itEnd = testCases.end(); it != itEnd; ++it ) if( matchTest( *it, testSpec, config ) ) filtered.push_back( *it ); return filtered; } std::vector const& getAllTestCasesSorted( IConfig const& config ) { return getRegistryHub().getTestCaseRegistry().getAllTestsSorted( config ); } class TestRegistry : public ITestCaseRegistry { public: TestRegistry() : m_currentSortOrder( RunTests::InDeclarationOrder ), m_unnamedCount( 0 ) {} virtual ~TestRegistry(); virtual void registerTest( TestCase const& testCase ) { std::string name = testCase.getTestCaseInfo().name; if( name.empty() ) { std::ostringstream oss; oss << "Anonymous test case " << ++m_unnamedCount; return registerTest( testCase.withName( oss.str() ) ); } m_functions.push_back( testCase ); } virtual std::vector const& getAllTests() const { return m_functions; } virtual std::vector const& getAllTestsSorted( IConfig const& config ) const { if( m_sortedFunctions.empty() ) enforceNoDuplicateTestCases( m_functions ); if( m_currentSortOrder != config.runOrder() || m_sortedFunctions.empty() ) { m_sortedFunctions = sortTests( config, m_functions ); m_currentSortOrder = config.runOrder(); } return m_sortedFunctions; } private: std::vector m_functions; mutable RunTests::InWhatOrder m_currentSortOrder; mutable std::vector m_sortedFunctions; size_t m_unnamedCount; std::ios_base::Init m_ostreamInit; // Forces cout/ cerr to be initialised }; /////////////////////////////////////////////////////////////////////////// class FreeFunctionTestCase : public SharedImpl { public: FreeFunctionTestCase( TestFunction fun ) : m_fun( fun ) {} virtual void invoke() const { m_fun(); } private: virtual ~FreeFunctionTestCase(); TestFunction m_fun; }; inline std::string extractClassName( std::string const& classOrQualifiedMethodName ) { std::string className = classOrQualifiedMethodName; if( startsWith( className, '&' ) ) { std::size_t lastColons = className.rfind( "::" ); std::size_t penultimateColons = className.rfind( "::", lastColons-1 ); if( penultimateColons == std::string::npos ) penultimateColons = 1; className = className.substr( penultimateColons, lastColons-penultimateColons ); } return className; } void registerTestCase ( ITestCase* testCase, char const* classOrQualifiedMethodName, NameAndDesc const& nameAndDesc, SourceLineInfo const& lineInfo ) { getMutableRegistryHub().registerTest ( makeTestCase ( testCase, extractClassName( classOrQualifiedMethodName ), nameAndDesc.name, nameAndDesc.description, lineInfo ) ); } void registerTestCaseFunction ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ) { registerTestCase( new FreeFunctionTestCase( function ), "", nameAndDesc, lineInfo ); } /////////////////////////////////////////////////////////////////////////// AutoReg::AutoReg ( TestFunction function, SourceLineInfo const& lineInfo, NameAndDesc const& nameAndDesc ) { registerTestCaseFunction( function, lineInfo, nameAndDesc ); } AutoReg::~AutoReg() {} } // end namespace Catch // #included from: catch_reporter_registry.hpp #define TWOBLUECUBES_CATCH_REPORTER_REGISTRY_HPP_INCLUDED #include namespace Catch { class ReporterRegistry : public IReporterRegistry { public: virtual ~ReporterRegistry() CATCH_OVERRIDE {} virtual IStreamingReporter* create( std::string const& name, Ptr const& config ) const CATCH_OVERRIDE { FactoryMap::const_iterator it = m_factories.find( name ); if( it == m_factories.end() ) return CATCH_NULL; return it->second->create( ReporterConfig( config ) ); } void registerReporter( std::string const& name, Ptr const& factory ) { m_factories.insert( std::make_pair( name, factory ) ); } void registerListener( Ptr const& factory ) { m_listeners.push_back( factory ); } virtual FactoryMap const& getFactories() const CATCH_OVERRIDE { return m_factories; } virtual Listeners const& getListeners() const CATCH_OVERRIDE { return m_listeners; } private: FactoryMap m_factories; Listeners m_listeners; }; } // #included from: catch_exception_translator_registry.hpp #define TWOBLUECUBES_CATCH_EXCEPTION_TRANSLATOR_REGISTRY_HPP_INCLUDED #ifdef __OBJC__ #import "Foundation/Foundation.h" #endif namespace Catch { class ExceptionTranslatorRegistry : public IExceptionTranslatorRegistry { public: ~ExceptionTranslatorRegistry() { deleteAll( m_translators ); } virtual void registerTranslator( const IExceptionTranslator* translator ) { m_translators.push_back( translator ); } virtual std::string translateActiveException() const { try { #ifdef __OBJC__ // In Objective-C try objective-c exceptions first @try { return tryTranslators(); } @catch (NSException *exception) { return Catch::toString( [exception description] ); } #else return tryTranslators(); #endif } catch( TestFailureException& ) { throw; } catch( std::exception& ex ) { return ex.what(); } catch( std::string& msg ) { return msg; } catch( const char* msg ) { return msg; } catch(...) { return "Unknown exception"; } } std::string tryTranslators() const { if( m_translators.empty() ) throw; else return m_translators[0]->translate( m_translators.begin()+1, m_translators.end() ); } private: std::vector m_translators; }; } // #included from: catch_tag_alias_registry.h #define TWOBLUECUBES_CATCH_TAG_ALIAS_REGISTRY_H_INCLUDED #include namespace Catch { class TagAliasRegistry : public ITagAliasRegistry { public: virtual ~TagAliasRegistry(); virtual Option find( std::string const& alias ) const; virtual std::string expandAliases( std::string const& unexpandedTestSpec ) const; void add( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ); private: std::map m_registry; }; } // end namespace Catch namespace Catch { namespace { class RegistryHub : public IRegistryHub, public IMutableRegistryHub { RegistryHub( RegistryHub const& ); void operator=( RegistryHub const& ); public: // IRegistryHub RegistryHub() { } virtual IReporterRegistry const& getReporterRegistry() const CATCH_OVERRIDE { return m_reporterRegistry; } virtual ITestCaseRegistry const& getTestCaseRegistry() const CATCH_OVERRIDE { return m_testCaseRegistry; } virtual IExceptionTranslatorRegistry& getExceptionTranslatorRegistry() CATCH_OVERRIDE { return m_exceptionTranslatorRegistry; } virtual ITagAliasRegistry const& getTagAliasRegistry() const CATCH_OVERRIDE { return m_tagAliasRegistry; } public: // IMutableRegistryHub virtual void registerReporter( std::string const& name, Ptr const& factory ) CATCH_OVERRIDE { m_reporterRegistry.registerReporter( name, factory ); } virtual void registerListener( Ptr const& factory ) CATCH_OVERRIDE { m_reporterRegistry.registerListener( factory ); } virtual void registerTest( TestCase const& testInfo ) CATCH_OVERRIDE { m_testCaseRegistry.registerTest( testInfo ); } virtual void registerTranslator( const IExceptionTranslator* translator ) CATCH_OVERRIDE { m_exceptionTranslatorRegistry.registerTranslator( translator ); } virtual void registerTagAlias( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) CATCH_OVERRIDE { m_tagAliasRegistry.add( alias, tag, lineInfo ); } private: TestRegistry m_testCaseRegistry; ReporterRegistry m_reporterRegistry; ExceptionTranslatorRegistry m_exceptionTranslatorRegistry; TagAliasRegistry m_tagAliasRegistry; }; // Single, global, instance inline RegistryHub*& getTheRegistryHub() { static RegistryHub* theRegistryHub = CATCH_NULL; if( !theRegistryHub ) theRegistryHub = new RegistryHub(); return theRegistryHub; } } IRegistryHub& getRegistryHub() { return *getTheRegistryHub(); } IMutableRegistryHub& getMutableRegistryHub() { return *getTheRegistryHub(); } void cleanUp() { delete getTheRegistryHub(); getTheRegistryHub() = CATCH_NULL; cleanUpContext(); } std::string translateActiveException() { return getRegistryHub().getExceptionTranslatorRegistry().translateActiveException(); } } // end namespace Catch // #included from: catch_notimplemented_exception.hpp #define TWOBLUECUBES_CATCH_NOTIMPLEMENTED_EXCEPTION_HPP_INCLUDED #include namespace Catch { NotImplementedException::NotImplementedException( SourceLineInfo const& lineInfo ) : m_lineInfo( lineInfo ) { std::ostringstream oss; oss << lineInfo << ": function "; oss << "not implemented"; m_what = oss.str(); } const char* NotImplementedException::what() const CATCH_NOEXCEPT { return m_what.c_str(); } } // end namespace Catch // #included from: catch_context_impl.hpp #define TWOBLUECUBES_CATCH_CONTEXT_IMPL_HPP_INCLUDED // #included from: catch_stream.hpp #define TWOBLUECUBES_CATCH_STREAM_HPP_INCLUDED #include #include #include namespace Catch { template class StreamBufImpl : public StreamBufBase { char data[bufferSize]; WriterF m_writer; public: StreamBufImpl() { setp( data, data + sizeof(data) ); } ~StreamBufImpl() CATCH_NOEXCEPT { sync(); } private: int overflow( int c ) { sync(); if( c != EOF ) { if( pbase() == epptr() ) m_writer( std::string( 1, static_cast( c ) ) ); else sputc( static_cast( c ) ); } return 0; } int sync() { if( pbase() != pptr() ) { m_writer( std::string( pbase(), static_cast( pptr() - pbase() ) ) ); setp( pbase(), epptr() ); } return 0; } }; /////////////////////////////////////////////////////////////////////////// FileStream::FileStream( std::string const& filename ) { m_ofs.open( filename.c_str() ); if( m_ofs.fail() ) { std::ostringstream oss; oss << "Unable to open file: '" << filename << '\''; throw std::domain_error( oss.str() ); } } std::ostream& FileStream::stream() const { return m_ofs; } struct OutputDebugWriter { void operator()( std::string const&str ) { writeToDebugConsole( str ); } }; DebugOutStream::DebugOutStream() : m_streamBuf( new StreamBufImpl() ), m_os( m_streamBuf.get() ) {} std::ostream& DebugOutStream::stream() const { return m_os; } // Store the streambuf from cout up-front because // cout may get redirected when running tests CoutStream::CoutStream() : m_os( Catch::cout().rdbuf() ) {} std::ostream& CoutStream::stream() const { return m_os; } #ifndef CATCH_CONFIG_NOSTDOUT // If you #define this you must implement these functions std::ostream& cout() { return std::cout; } std::ostream& cerr() { return std::cerr; } #endif } namespace Catch { class Context : public IMutableContext { Context() : m_config( CATCH_NULL ), m_runner( CATCH_NULL ), m_resultCapture( CATCH_NULL ) {} Context( Context const& ); void operator=( Context const& ); public: virtual ~Context() { deleteAllValues( m_generatorsByTestName ); } public: // IContext virtual IResultCapture* getResultCapture() { return m_resultCapture; } virtual IRunner* getRunner() { return m_runner; } virtual size_t getGeneratorIndex( std::string const& fileInfo, size_t totalSize ) { return getGeneratorsForCurrentTest() .getGeneratorInfo( fileInfo, totalSize ) .getCurrentIndex(); } virtual bool advanceGeneratorsForCurrentTest() { IGeneratorsForTest* generators = findGeneratorsForCurrentTest(); return generators && generators->moveNext(); } virtual Ptr getConfig() const { return m_config; } public: // IMutableContext virtual void setResultCapture( IResultCapture* resultCapture ) { m_resultCapture = resultCapture; } virtual void setRunner( IRunner* runner ) { m_runner = runner; } virtual void setConfig( Ptr const& config ) { m_config = config; } friend IMutableContext& getCurrentMutableContext(); private: IGeneratorsForTest* findGeneratorsForCurrentTest() { std::string testName = getResultCapture()->getCurrentTestName(); std::map::const_iterator it = m_generatorsByTestName.find( testName ); return it != m_generatorsByTestName.end() ? it->second : CATCH_NULL; } IGeneratorsForTest& getGeneratorsForCurrentTest() { IGeneratorsForTest* generators = findGeneratorsForCurrentTest(); if( !generators ) { std::string testName = getResultCapture()->getCurrentTestName(); generators = createGeneratorsForTest(); m_generatorsByTestName.insert( std::make_pair( testName, generators ) ); } return *generators; } private: Ptr m_config; IRunner* m_runner; IResultCapture* m_resultCapture; std::map m_generatorsByTestName; }; namespace { Context* currentContext = CATCH_NULL; } IMutableContext& getCurrentMutableContext() { if( !currentContext ) currentContext = new Context(); return *currentContext; } IContext& getCurrentContext() { return getCurrentMutableContext(); } void cleanUpContext() { delete currentContext; currentContext = CATCH_NULL; } } // #included from: catch_console_colour_impl.hpp #define TWOBLUECUBES_CATCH_CONSOLE_COLOUR_IMPL_HPP_INCLUDED // #included from: catch_errno_guard.hpp #define TWOBLUECUBES_CATCH_ERRNO_GUARD_HPP_INCLUDED #include namespace Catch { class ErrnoGuard { public: ErrnoGuard():m_oldErrno(errno){} ~ErrnoGuard() { errno = m_oldErrno; } private: int m_oldErrno; }; } namespace Catch { namespace { struct IColourImpl { virtual ~IColourImpl() {} virtual void use( Colour::Code _colourCode ) = 0; }; struct NoColourImpl : IColourImpl { void use( Colour::Code ) {} static IColourImpl* instance() { static NoColourImpl s_instance; return &s_instance; } }; } // anon namespace } // namespace Catch #if !defined( CATCH_CONFIG_COLOUR_NONE ) && !defined( CATCH_CONFIG_COLOUR_WINDOWS ) && !defined( CATCH_CONFIG_COLOUR_ANSI ) # ifdef CATCH_PLATFORM_WINDOWS # define CATCH_CONFIG_COLOUR_WINDOWS # else # define CATCH_CONFIG_COLOUR_ANSI # endif #endif #if defined ( CATCH_CONFIG_COLOUR_WINDOWS ) ///////////////////////////////////////// namespace Catch { namespace { class Win32ColourImpl : public IColourImpl { public: Win32ColourImpl() : stdoutHandle( GetStdHandle(STD_OUTPUT_HANDLE) ) { CONSOLE_SCREEN_BUFFER_INFO csbiInfo; GetConsoleScreenBufferInfo( stdoutHandle, &csbiInfo ); originalForegroundAttributes = csbiInfo.wAttributes & ~( BACKGROUND_GREEN | BACKGROUND_RED | BACKGROUND_BLUE | BACKGROUND_INTENSITY ); originalBackgroundAttributes = csbiInfo.wAttributes & ~( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE | FOREGROUND_INTENSITY ); } virtual void use( Colour::Code _colourCode ) { switch( _colourCode ) { case Colour::None: return setTextAttribute( originalForegroundAttributes ); case Colour::White: return setTextAttribute( FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE ); case Colour::Red: return setTextAttribute( FOREGROUND_RED ); case Colour::Green: return setTextAttribute( FOREGROUND_GREEN ); case Colour::Blue: return setTextAttribute( FOREGROUND_BLUE ); case Colour::Cyan: return setTextAttribute( FOREGROUND_BLUE | FOREGROUND_GREEN ); case Colour::Yellow: return setTextAttribute( FOREGROUND_RED | FOREGROUND_GREEN ); case Colour::Grey: return setTextAttribute( 0 ); case Colour::LightGrey: return setTextAttribute( FOREGROUND_INTENSITY ); case Colour::BrightRed: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_RED ); case Colour::BrightGreen: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_GREEN ); case Colour::BrightWhite: return setTextAttribute( FOREGROUND_INTENSITY | FOREGROUND_GREEN | FOREGROUND_RED | FOREGROUND_BLUE ); case Colour::Bright: throw std::logic_error( "not a colour" ); } } private: void setTextAttribute( WORD _textAttribute ) { SetConsoleTextAttribute( stdoutHandle, _textAttribute | originalBackgroundAttributes ); } HANDLE stdoutHandle; WORD originalForegroundAttributes; WORD originalBackgroundAttributes; }; IColourImpl* platformColourInstance() { static Win32ColourImpl s_instance; Ptr config = getCurrentContext().getConfig(); UseColour::YesOrNo colourMode = config ? config->useColour() : UseColour::Auto; if( colourMode == UseColour::Auto ) colourMode = !isDebuggerActive() ? UseColour::Yes : UseColour::No; return colourMode == UseColour::Yes ? &s_instance : NoColourImpl::instance(); } } // end anon namespace } // end namespace Catch #elif defined( CATCH_CONFIG_COLOUR_ANSI ) ////////////////////////////////////// #include namespace Catch { namespace { // use POSIX/ ANSI console terminal codes // Thanks to Adam Strzelecki for original contribution // (http://github.com/nanoant) // https://github.com/philsquared/Catch/pull/131 class PosixColourImpl : public IColourImpl { public: virtual void use( Colour::Code _colourCode ) { switch( _colourCode ) { case Colour::None: case Colour::White: return setColour( "[0m" ); case Colour::Red: return setColour( "[0;31m" ); case Colour::Green: return setColour( "[0;32m" ); case Colour::Blue: return setColour( "[0;34m" ); case Colour::Cyan: return setColour( "[0;36m" ); case Colour::Yellow: return setColour( "[0;33m" ); case Colour::Grey: return setColour( "[1;30m" ); case Colour::LightGrey: return setColour( "[0;37m" ); case Colour::BrightRed: return setColour( "[1;31m" ); case Colour::BrightGreen: return setColour( "[1;32m" ); case Colour::BrightWhite: return setColour( "[1;37m" ); case Colour::Bright: throw std::logic_error( "not a colour" ); } } static IColourImpl* instance() { static PosixColourImpl s_instance; return &s_instance; } private: void setColour( const char* _escapeCode ) { Catch::cout() << '\033' << _escapeCode; } }; IColourImpl* platformColourInstance() { ErrnoGuard guard; Ptr config = getCurrentContext().getConfig(); UseColour::YesOrNo colourMode = config ? config->useColour() : UseColour::Auto; if( colourMode == UseColour::Auto ) colourMode = (!isDebuggerActive() && isatty(STDOUT_FILENO) ) ? UseColour::Yes : UseColour::No; return colourMode == UseColour::Yes ? PosixColourImpl::instance() : NoColourImpl::instance(); } } // end anon namespace } // end namespace Catch #else // not Windows or ANSI /////////////////////////////////////////////// namespace Catch { static IColourImpl* platformColourInstance() { return NoColourImpl::instance(); } } // end namespace Catch #endif // Windows/ ANSI/ None namespace Catch { Colour::Colour( Code _colourCode ) : m_moved( false ) { use( _colourCode ); } Colour::Colour( Colour const& _other ) : m_moved( false ) { const_cast( _other ).m_moved = true; } Colour::~Colour(){ if( !m_moved ) use( None ); } void Colour::use( Code _colourCode ) { static IColourImpl* impl = platformColourInstance(); impl->use( _colourCode ); } } // end namespace Catch // #included from: catch_generators_impl.hpp #define TWOBLUECUBES_CATCH_GENERATORS_IMPL_HPP_INCLUDED #include #include #include namespace Catch { struct GeneratorInfo : IGeneratorInfo { GeneratorInfo( std::size_t size ) : m_size( size ), m_currentIndex( 0 ) {} bool moveNext() { if( ++m_currentIndex == m_size ) { m_currentIndex = 0; return false; } return true; } std::size_t getCurrentIndex() const { return m_currentIndex; } std::size_t m_size; std::size_t m_currentIndex; }; /////////////////////////////////////////////////////////////////////////// class GeneratorsForTest : public IGeneratorsForTest { public: ~GeneratorsForTest() { deleteAll( m_generatorsInOrder ); } IGeneratorInfo& getGeneratorInfo( std::string const& fileInfo, std::size_t size ) { std::map::const_iterator it = m_generatorsByName.find( fileInfo ); if( it == m_generatorsByName.end() ) { IGeneratorInfo* info = new GeneratorInfo( size ); m_generatorsByName.insert( std::make_pair( fileInfo, info ) ); m_generatorsInOrder.push_back( info ); return *info; } return *it->second; } bool moveNext() { std::vector::const_iterator it = m_generatorsInOrder.begin(); std::vector::const_iterator itEnd = m_generatorsInOrder.end(); for(; it != itEnd; ++it ) { if( (*it)->moveNext() ) return true; } return false; } private: std::map m_generatorsByName; std::vector m_generatorsInOrder; }; IGeneratorsForTest* createGeneratorsForTest() { return new GeneratorsForTest(); } } // end namespace Catch // #included from: catch_assertionresult.hpp #define TWOBLUECUBES_CATCH_ASSERTIONRESULT_HPP_INCLUDED namespace Catch { AssertionInfo::AssertionInfo( char const * _macroName, SourceLineInfo const& _lineInfo, char const * _capturedExpression, ResultDisposition::Flags _resultDisposition, char const * _secondArg) : macroName( _macroName ), lineInfo( _lineInfo ), capturedExpression( _capturedExpression ), resultDisposition( _resultDisposition ), secondArg( _secondArg ) {} AssertionResult::AssertionResult() {} AssertionResult::AssertionResult( AssertionInfo const& info, AssertionResultData const& data ) : m_info( info ), m_resultData( data ) {} AssertionResult::~AssertionResult() {} // Result was a success bool AssertionResult::succeeded() const { return Catch::isOk( m_resultData.resultType ); } // Result was a success, or failure is suppressed bool AssertionResult::isOk() const { return Catch::isOk( m_resultData.resultType ) || shouldSuppressFailure( m_info.resultDisposition ); } ResultWas::OfType AssertionResult::getResultType() const { return m_resultData.resultType; } bool AssertionResult::hasExpression() const { return m_info.capturedExpression[0] != 0; } bool AssertionResult::hasMessage() const { return !m_resultData.message.empty(); } std::string capturedExpressionWithSecondArgument( char const * capturedExpression, char const * secondArg ) { return (secondArg[0] == 0 || secondArg[0] == '"' && secondArg[1] == '"') ? capturedExpression : std::string(capturedExpression) + ", " + secondArg; } std::string AssertionResult::getExpression() const { if( isFalseTest( m_info.resultDisposition ) ) return '!' + capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg); else return capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg); } std::string AssertionResult::getExpressionInMacro() const { if( m_info.macroName[0] == 0 ) return capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg); else return std::string(m_info.macroName) + "( " + capturedExpressionWithSecondArgument(m_info.capturedExpression, m_info.secondArg) + " )"; } bool AssertionResult::hasExpandedExpression() const { return hasExpression() && getExpandedExpression() != getExpression(); } std::string AssertionResult::getExpandedExpression() const { return m_resultData.reconstructExpression(); } std::string AssertionResult::getMessage() const { return m_resultData.message; } SourceLineInfo AssertionResult::getSourceInfo() const { return m_info.lineInfo; } std::string AssertionResult::getTestMacroName() const { return m_info.macroName; } void AssertionResult::discardDecomposedExpression() const { m_resultData.decomposedExpression = CATCH_NULL; } void AssertionResult::expandDecomposedExpression() const { m_resultData.reconstructExpression(); } } // end namespace Catch // #included from: catch_test_case_info.hpp #define TWOBLUECUBES_CATCH_TEST_CASE_INFO_HPP_INCLUDED #include namespace Catch { inline TestCaseInfo::SpecialProperties parseSpecialTag( std::string const& tag ) { if( startsWith( tag, '.' ) || tag == "hide" || tag == "!hide" ) return TestCaseInfo::IsHidden; else if( tag == "!throws" ) return TestCaseInfo::Throws; else if( tag == "!shouldfail" ) return TestCaseInfo::ShouldFail; else if( tag == "!mayfail" ) return TestCaseInfo::MayFail; else if( tag == "!nonportable" ) return TestCaseInfo::NonPortable; else return TestCaseInfo::None; } inline bool isReservedTag( std::string const& tag ) { return parseSpecialTag( tag ) == TestCaseInfo::None && tag.size() > 0 && !std::isalnum( tag[0] ); } inline void enforceNotReservedTag( std::string const& tag, SourceLineInfo const& _lineInfo ) { if( isReservedTag( tag ) ) { std::ostringstream ss; ss << Colour(Colour::Red) << "Tag name [" << tag << "] not allowed.\n" << "Tag names starting with non alpha-numeric characters are reserved\n" << Colour(Colour::FileName) << _lineInfo << '\n'; throw std::runtime_error(ss.str()); } } TestCase makeTestCase( ITestCase* _testCase, std::string const& _className, std::string const& _name, std::string const& _descOrTags, SourceLineInfo const& _lineInfo ) { bool isHidden( startsWith( _name, "./" ) ); // Legacy support // Parse out tags std::set tags; std::string desc, tag; bool inTag = false; for( std::size_t i = 0; i < _descOrTags.size(); ++i ) { char c = _descOrTags[i]; if( !inTag ) { if( c == '[' ) inTag = true; else desc += c; } else { if( c == ']' ) { TestCaseInfo::SpecialProperties prop = parseSpecialTag( tag ); if( prop == TestCaseInfo::IsHidden ) isHidden = true; else if( prop == TestCaseInfo::None ) enforceNotReservedTag( tag, _lineInfo ); tags.insert( tag ); tag.clear(); inTag = false; } else tag += c; } } if( isHidden ) { tags.insert( "hide" ); tags.insert( "." ); } TestCaseInfo info( _name, _className, desc, tags, _lineInfo ); return TestCase( _testCase, info ); } void setTags( TestCaseInfo& testCaseInfo, std::set const& tags ) { testCaseInfo.tags = tags; testCaseInfo.lcaseTags.clear(); std::ostringstream oss; for( std::set::const_iterator it = tags.begin(), itEnd = tags.end(); it != itEnd; ++it ) { oss << '[' << *it << ']'; std::string lcaseTag = toLower( *it ); testCaseInfo.properties = static_cast( testCaseInfo.properties | parseSpecialTag( lcaseTag ) ); testCaseInfo.lcaseTags.insert( lcaseTag ); } testCaseInfo.tagsAsString = oss.str(); } TestCaseInfo::TestCaseInfo( std::string const& _name, std::string const& _className, std::string const& _description, std::set const& _tags, SourceLineInfo const& _lineInfo ) : name( _name ), className( _className ), description( _description ), lineInfo( _lineInfo ), properties( None ) { setTags( *this, _tags ); } TestCaseInfo::TestCaseInfo( TestCaseInfo const& other ) : name( other.name ), className( other.className ), description( other.description ), tags( other.tags ), lcaseTags( other.lcaseTags ), tagsAsString( other.tagsAsString ), lineInfo( other.lineInfo ), properties( other.properties ) {} bool TestCaseInfo::isHidden() const { return ( properties & IsHidden ) != 0; } bool TestCaseInfo::throws() const { return ( properties & Throws ) != 0; } bool TestCaseInfo::okToFail() const { return ( properties & (ShouldFail | MayFail ) ) != 0; } bool TestCaseInfo::expectedToFail() const { return ( properties & (ShouldFail ) ) != 0; } TestCase::TestCase( ITestCase* testCase, TestCaseInfo const& info ) : TestCaseInfo( info ), test( testCase ) {} TestCase::TestCase( TestCase const& other ) : TestCaseInfo( other ), test( other.test ) {} TestCase TestCase::withName( std::string const& _newName ) const { TestCase other( *this ); other.name = _newName; return other; } void TestCase::swap( TestCase& other ) { test.swap( other.test ); name.swap( other.name ); className.swap( other.className ); description.swap( other.description ); tags.swap( other.tags ); lcaseTags.swap( other.lcaseTags ); tagsAsString.swap( other.tagsAsString ); std::swap( TestCaseInfo::properties, static_cast( other ).properties ); std::swap( lineInfo, other.lineInfo ); } void TestCase::invoke() const { test->invoke(); } bool TestCase::operator == ( TestCase const& other ) const { return test.get() == other.test.get() && name == other.name && className == other.className; } bool TestCase::operator < ( TestCase const& other ) const { return name < other.name; } TestCase& TestCase::operator = ( TestCase const& other ) { TestCase temp( other ); swap( temp ); return *this; } TestCaseInfo const& TestCase::getTestCaseInfo() const { return *this; } } // end namespace Catch // #included from: catch_version.hpp #define TWOBLUECUBES_CATCH_VERSION_HPP_INCLUDED namespace Catch { Version::Version ( unsigned int _majorVersion, unsigned int _minorVersion, unsigned int _patchNumber, char const * const _branchName, unsigned int _buildNumber ) : majorVersion( _majorVersion ), minorVersion( _minorVersion ), patchNumber( _patchNumber ), branchName( _branchName ), buildNumber( _buildNumber ) {} std::ostream& operator << ( std::ostream& os, Version const& version ) { os << version.majorVersion << '.' << version.minorVersion << '.' << version.patchNumber; // branchName is never null -> 0th char is \0 if it is empty if (version.branchName[0]) { os << '-' << version.branchName << '.' << version.buildNumber; } return os; } inline Version libraryVersion() { static Version version( 1, 9, 6, "", 0 ); return version; } } // #included from: catch_message.hpp #define TWOBLUECUBES_CATCH_MESSAGE_HPP_INCLUDED namespace Catch { MessageInfo::MessageInfo( std::string const& _macroName, SourceLineInfo const& _lineInfo, ResultWas::OfType _type ) : macroName( _macroName ), lineInfo( _lineInfo ), type( _type ), sequence( ++globalCount ) {} // This may need protecting if threading support is added unsigned int MessageInfo::globalCount = 0; //////////////////////////////////////////////////////////////////////////// ScopedMessage::ScopedMessage( MessageBuilder const& builder ) : m_info( builder.m_info ) { m_info.message = builder.m_stream.str(); getResultCapture().pushScopedMessage( m_info ); } ScopedMessage::ScopedMessage( ScopedMessage const& other ) : m_info( other.m_info ) {} ScopedMessage::~ScopedMessage() { if ( !HasUncaughtException() ){ getResultCapture().popScopedMessage(m_info); } } } // end namespace Catch // #included from: catch_legacy_reporter_adapter.hpp #define TWOBLUECUBES_CATCH_LEGACY_REPORTER_ADAPTER_HPP_INCLUDED // #included from: catch_legacy_reporter_adapter.h #define TWOBLUECUBES_CATCH_LEGACY_REPORTER_ADAPTER_H_INCLUDED namespace Catch { // Deprecated struct IReporter : IShared { virtual ~IReporter(); virtual bool shouldRedirectStdout() const = 0; virtual void StartTesting() = 0; virtual void EndTesting( Totals const& totals ) = 0; virtual void StartGroup( std::string const& groupName ) = 0; virtual void EndGroup( std::string const& groupName, Totals const& totals ) = 0; virtual void StartTestCase( TestCaseInfo const& testInfo ) = 0; virtual void EndTestCase( TestCaseInfo const& testInfo, Totals const& totals, std::string const& stdOut, std::string const& stdErr ) = 0; virtual void StartSection( std::string const& sectionName, std::string const& description ) = 0; virtual void EndSection( std::string const& sectionName, Counts const& assertions ) = 0; virtual void NoAssertionsInSection( std::string const& sectionName ) = 0; virtual void NoAssertionsInTestCase( std::string const& testName ) = 0; virtual void Aborted() = 0; virtual void Result( AssertionResult const& result ) = 0; }; class LegacyReporterAdapter : public SharedImpl { public: LegacyReporterAdapter( Ptr const& legacyReporter ); virtual ~LegacyReporterAdapter(); virtual ReporterPreferences getPreferences() const; virtual void noMatchingTestCases( std::string const& ); virtual void testRunStarting( TestRunInfo const& ); virtual void testGroupStarting( GroupInfo const& groupInfo ); virtual void testCaseStarting( TestCaseInfo const& testInfo ); virtual void sectionStarting( SectionInfo const& sectionInfo ); virtual void assertionStarting( AssertionInfo const& ); virtual bool assertionEnded( AssertionStats const& assertionStats ); virtual void sectionEnded( SectionStats const& sectionStats ); virtual void testCaseEnded( TestCaseStats const& testCaseStats ); virtual void testGroupEnded( TestGroupStats const& testGroupStats ); virtual void testRunEnded( TestRunStats const& testRunStats ); virtual void skipTest( TestCaseInfo const& ); private: Ptr m_legacyReporter; }; } namespace Catch { LegacyReporterAdapter::LegacyReporterAdapter( Ptr const& legacyReporter ) : m_legacyReporter( legacyReporter ) {} LegacyReporterAdapter::~LegacyReporterAdapter() {} ReporterPreferences LegacyReporterAdapter::getPreferences() const { ReporterPreferences prefs; prefs.shouldRedirectStdOut = m_legacyReporter->shouldRedirectStdout(); return prefs; } void LegacyReporterAdapter::noMatchingTestCases( std::string const& ) {} void LegacyReporterAdapter::testRunStarting( TestRunInfo const& ) { m_legacyReporter->StartTesting(); } void LegacyReporterAdapter::testGroupStarting( GroupInfo const& groupInfo ) { m_legacyReporter->StartGroup( groupInfo.name ); } void LegacyReporterAdapter::testCaseStarting( TestCaseInfo const& testInfo ) { m_legacyReporter->StartTestCase( testInfo ); } void LegacyReporterAdapter::sectionStarting( SectionInfo const& sectionInfo ) { m_legacyReporter->StartSection( sectionInfo.name, sectionInfo.description ); } void LegacyReporterAdapter::assertionStarting( AssertionInfo const& ) { // Not on legacy interface } bool LegacyReporterAdapter::assertionEnded( AssertionStats const& assertionStats ) { if( assertionStats.assertionResult.getResultType() != ResultWas::Ok ) { for( std::vector::const_iterator it = assertionStats.infoMessages.begin(), itEnd = assertionStats.infoMessages.end(); it != itEnd; ++it ) { if( it->type == ResultWas::Info ) { ResultBuilder rb( it->macroName.c_str(), it->lineInfo, "", ResultDisposition::Normal ); rb << it->message; rb.setResultType( ResultWas::Info ); AssertionResult result = rb.build(); m_legacyReporter->Result( result ); } } } m_legacyReporter->Result( assertionStats.assertionResult ); return true; } void LegacyReporterAdapter::sectionEnded( SectionStats const& sectionStats ) { if( sectionStats.missingAssertions ) m_legacyReporter->NoAssertionsInSection( sectionStats.sectionInfo.name ); m_legacyReporter->EndSection( sectionStats.sectionInfo.name, sectionStats.assertions ); } void LegacyReporterAdapter::testCaseEnded( TestCaseStats const& testCaseStats ) { m_legacyReporter->EndTestCase ( testCaseStats.testInfo, testCaseStats.totals, testCaseStats.stdOut, testCaseStats.stdErr ); } void LegacyReporterAdapter::testGroupEnded( TestGroupStats const& testGroupStats ) { if( testGroupStats.aborting ) m_legacyReporter->Aborted(); m_legacyReporter->EndGroup( testGroupStats.groupInfo.name, testGroupStats.totals ); } void LegacyReporterAdapter::testRunEnded( TestRunStats const& testRunStats ) { m_legacyReporter->EndTesting( testRunStats.totals ); } void LegacyReporterAdapter::skipTest( TestCaseInfo const& ) { } } // #included from: catch_timer.hpp #ifdef __clang__ # pragma clang diagnostic push # pragma clang diagnostic ignored "-Wc++11-long-long" #endif #ifdef CATCH_PLATFORM_WINDOWS #else #include #endif namespace Catch { namespace { #ifdef CATCH_PLATFORM_WINDOWS UInt64 getCurrentTicks() { static UInt64 hz=0, hzo=0; if (!hz) { QueryPerformanceFrequency( reinterpret_cast( &hz ) ); QueryPerformanceCounter( reinterpret_cast( &hzo ) ); } UInt64 t; QueryPerformanceCounter( reinterpret_cast( &t ) ); return ((t-hzo)*1000000)/hz; } #else UInt64 getCurrentTicks() { timeval t; gettimeofday(&t,CATCH_NULL); return static_cast( t.tv_sec ) * 1000000ull + static_cast( t.tv_usec ); } #endif } void Timer::start() { m_ticks = getCurrentTicks(); } unsigned int Timer::getElapsedMicroseconds() const { return static_cast(getCurrentTicks() - m_ticks); } unsigned int Timer::getElapsedMilliseconds() const { return static_cast(getElapsedMicroseconds()/1000); } double Timer::getElapsedSeconds() const { return getElapsedMicroseconds()/1000000.0; } } // namespace Catch #ifdef __clang__ # pragma clang diagnostic pop #endif // #included from: catch_common.hpp #define TWOBLUECUBES_CATCH_COMMON_HPP_INCLUDED #include #include namespace Catch { bool startsWith( std::string const& s, std::string const& prefix ) { return s.size() >= prefix.size() && std::equal(prefix.begin(), prefix.end(), s.begin()); } bool startsWith( std::string const& s, char prefix ) { return !s.empty() && s[0] == prefix; } bool endsWith( std::string const& s, std::string const& suffix ) { return s.size() >= suffix.size() && std::equal(suffix.rbegin(), suffix.rend(), s.rbegin()); } bool endsWith( std::string const& s, char suffix ) { return !s.empty() && s[s.size()-1] == suffix; } bool contains( std::string const& s, std::string const& infix ) { return s.find( infix ) != std::string::npos; } char toLowerCh(char c) { return static_cast( std::tolower( c ) ); } void toLowerInPlace( std::string& s ) { std::transform( s.begin(), s.end(), s.begin(), toLowerCh ); } std::string toLower( std::string const& s ) { std::string lc = s; toLowerInPlace( lc ); return lc; } std::string trim( std::string const& str ) { static char const* whitespaceChars = "\n\r\t "; std::string::size_type start = str.find_first_not_of( whitespaceChars ); std::string::size_type end = str.find_last_not_of( whitespaceChars ); return start != std::string::npos ? str.substr( start, 1+end-start ) : std::string(); } bool replaceInPlace( std::string& str, std::string const& replaceThis, std::string const& withThis ) { bool replaced = false; std::size_t i = str.find( replaceThis ); while( i != std::string::npos ) { replaced = true; str = str.substr( 0, i ) + withThis + str.substr( i+replaceThis.size() ); if( i < str.size()-withThis.size() ) i = str.find( replaceThis, i+withThis.size() ); else i = std::string::npos; } return replaced; } pluralise::pluralise( std::size_t count, std::string const& label ) : m_count( count ), m_label( label ) {} std::ostream& operator << ( std::ostream& os, pluralise const& pluraliser ) { os << pluraliser.m_count << ' ' << pluraliser.m_label; if( pluraliser.m_count != 1 ) os << 's'; return os; } SourceLineInfo::SourceLineInfo() : file(""), line( 0 ){} SourceLineInfo::SourceLineInfo( char const* _file, std::size_t _line ) : file( _file ), line( _line ) {} bool SourceLineInfo::empty() const { return file[0] == '\0'; } bool SourceLineInfo::operator == ( SourceLineInfo const& other ) const { return line == other.line && (file == other.file || std::strcmp(file, other.file) == 0); } bool SourceLineInfo::operator < ( SourceLineInfo const& other ) const { return line < other.line || ( line == other.line && (std::strcmp(file, other.file) < 0)); } void seedRng( IConfig const& config ) { if( config.rngSeed() != 0 ) srand( config.rngSeed() ); } unsigned int rngSeed() { return getCurrentContext().getConfig()->rngSeed(); } std::ostream& operator << ( std::ostream& os, SourceLineInfo const& info ) { #ifndef __GNUG__ os << info.file << '(' << info.line << ')'; #else os << info.file << ':' << info.line; #endif return os; } void throwLogicError( std::string const& message, SourceLineInfo const& locationInfo ) { std::ostringstream oss; oss << locationInfo << ": Internal Catch error: '" << message << '\''; if( alwaysTrue() ) throw std::logic_error( oss.str() ); } } // #included from: catch_section.hpp #define TWOBLUECUBES_CATCH_SECTION_HPP_INCLUDED namespace Catch { SectionInfo::SectionInfo ( SourceLineInfo const& _lineInfo, std::string const& _name, std::string const& _description ) : name( _name ), description( _description ), lineInfo( _lineInfo ) {} Section::Section( SectionInfo const& info ) : m_info( info ), m_sectionIncluded( getResultCapture().sectionStarted( m_info, m_assertions ) ) { m_timer.start(); } #if defined(_MSC_VER) # pragma warning(push) # pragma warning(disable:4996) // std::uncaught_exception is deprecated in C++17 #endif Section::~Section() { if( m_sectionIncluded ) { SectionEndInfo endInfo( m_info, m_assertions, m_timer.getElapsedSeconds() ); if( HasUncaughtException() ) getResultCapture().sectionEndedEarly( endInfo ); else getResultCapture().sectionEnded( endInfo ); } } #if defined(_MSC_VER) # pragma warning(pop) #endif // This indicates whether the section should be executed or not Section::operator bool() const { return m_sectionIncluded; } } // end namespace Catch // #included from: catch_debugger.hpp #define TWOBLUECUBES_CATCH_DEBUGGER_HPP_INCLUDED #ifdef CATCH_PLATFORM_MAC #include #include #include #include #include namespace Catch{ // The following function is taken directly from the following technical note: // http://developer.apple.com/library/mac/#qa/qa2004/qa1361.html // Returns true if the current process is being debugged (either // running under the debugger or has a debugger attached post facto). bool isDebuggerActive(){ int mib[4]; struct kinfo_proc info; size_t size; // Initialize the flags so that, if sysctl fails for some bizarre // reason, we get a predictable result. info.kp_proc.p_flag = 0; // Initialize mib, which tells sysctl the info we want, in this case // we're looking for information about a specific process ID. mib[0] = CTL_KERN; mib[1] = KERN_PROC; mib[2] = KERN_PROC_PID; mib[3] = getpid(); // Call sysctl. size = sizeof(info); if( sysctl(mib, sizeof(mib) / sizeof(*mib), &info, &size, CATCH_NULL, 0) != 0 ) { Catch::cerr() << "\n** Call to sysctl failed - unable to determine if debugger is active **\n" << std::endl; return false; } // We're being debugged if the P_TRACED flag is set. return ( (info.kp_proc.p_flag & P_TRACED) != 0 ); } } // namespace Catch #elif defined(CATCH_PLATFORM_LINUX) #include #include namespace Catch{ // The standard POSIX way of detecting a debugger is to attempt to // ptrace() the process, but this needs to be done from a child and not // this process itself to still allow attaching to this process later // if wanted, so is rather heavy. Under Linux we have the PID of the // "debugger" (which doesn't need to be gdb, of course, it could also // be strace, for example) in /proc/$PID/status, so just get it from // there instead. bool isDebuggerActive(){ // Libstdc++ has a bug, where std::ifstream sets errno to 0 // This way our users can properly assert over errno values ErrnoGuard guard; std::ifstream in("/proc/self/status"); for( std::string line; std::getline(in, line); ) { static const int PREFIX_LEN = 11; if( line.compare(0, PREFIX_LEN, "TracerPid:\t") == 0 ) { // We're traced if the PID is not 0 and no other PID starts // with 0 digit, so it's enough to check for just a single // character. return line.length() > PREFIX_LEN && line[PREFIX_LEN] != '0'; } } return false; } } // namespace Catch #elif defined(_MSC_VER) extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent(); namespace Catch { bool isDebuggerActive() { return IsDebuggerPresent() != 0; } } #elif defined(__MINGW32__) extern "C" __declspec(dllimport) int __stdcall IsDebuggerPresent(); namespace Catch { bool isDebuggerActive() { return IsDebuggerPresent() != 0; } } #else namespace Catch { inline bool isDebuggerActive() { return false; } } #endif // Platform #ifdef CATCH_PLATFORM_WINDOWS namespace Catch { void writeToDebugConsole( std::string const& text ) { ::OutputDebugStringA( text.c_str() ); } } #else namespace Catch { void writeToDebugConsole( std::string const& text ) { // !TBD: Need a version for Mac/ XCode and other IDEs Catch::cout() << text; } } #endif // Platform // #included from: catch_tostring.hpp #define TWOBLUECUBES_CATCH_TOSTRING_HPP_INCLUDED namespace Catch { namespace Detail { const std::string unprintableString = "{?}"; namespace { const int hexThreshold = 255; struct Endianness { enum Arch { Big, Little }; static Arch which() { union _{ int asInt; char asChar[sizeof (int)]; } u; u.asInt = 1; return ( u.asChar[sizeof(int)-1] == 1 ) ? Big : Little; } }; } std::string rawMemoryToString( const void *object, std::size_t size ) { // Reverse order for little endian architectures int i = 0, end = static_cast( size ), inc = 1; if( Endianness::which() == Endianness::Little ) { i = end-1; end = inc = -1; } unsigned char const *bytes = static_cast(object); std::ostringstream os; os << "0x" << std::setfill('0') << std::hex; for( ; i != end; i += inc ) os << std::setw(2) << static_cast(bytes[i]); return os.str(); } } std::string toString( std::string const& value ) { std::string s = value; if( getCurrentContext().getConfig()->showInvisibles() ) { for(size_t i = 0; i < s.size(); ++i ) { std::string subs; switch( s[i] ) { case '\n': subs = "\\n"; break; case '\t': subs = "\\t"; break; default: break; } if( !subs.empty() ) { s = s.substr( 0, i ) + subs + s.substr( i+1 ); ++i; } } } return '"' + s + '"'; } std::string toString( std::wstring const& value ) { std::string s; s.reserve( value.size() ); for(size_t i = 0; i < value.size(); ++i ) s += value[i] <= 0xff ? static_cast( value[i] ) : '?'; return Catch::toString( s ); } std::string toString( const char* const value ) { return value ? Catch::toString( std::string( value ) ) : std::string( "{null string}" ); } std::string toString( char* const value ) { return Catch::toString( static_cast( value ) ); } std::string toString( const wchar_t* const value ) { return value ? Catch::toString( std::wstring(value) ) : std::string( "{null string}" ); } std::string toString( wchar_t* const value ) { return Catch::toString( static_cast( value ) ); } std::string toString( int value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } std::string toString( unsigned long value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } std::string toString( unsigned int value ) { return Catch::toString( static_cast( value ) ); } template std::string fpToString( T value, int precision ) { std::ostringstream oss; oss << std::setprecision( precision ) << std::fixed << value; std::string d = oss.str(); std::size_t i = d.find_last_not_of( '0' ); if( i != std::string::npos && i != d.size()-1 ) { if( d[i] == '.' ) i++; d = d.substr( 0, i+1 ); } return d; } std::string toString( const double value ) { return fpToString( value, 10 ); } std::string toString( const float value ) { return fpToString( value, 5 ) + 'f'; } std::string toString( bool value ) { return value ? "true" : "false"; } std::string toString( char value ) { if ( value == '\r' ) return "'\\r'"; if ( value == '\f' ) return "'\\f'"; if ( value == '\n' ) return "'\\n'"; if ( value == '\t' ) return "'\\t'"; if ( '\0' <= value && value < ' ' ) return toString( static_cast( value ) ); char chstr[] = "' '"; chstr[1] = value; return chstr; } std::string toString( signed char value ) { return toString( static_cast( value ) ); } std::string toString( unsigned char value ) { return toString( static_cast( value ) ); } #ifdef CATCH_CONFIG_CPP11_LONG_LONG std::string toString( long long value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } std::string toString( unsigned long long value ) { std::ostringstream oss; oss << value; if( value > Detail::hexThreshold ) oss << " (0x" << std::hex << value << ')'; return oss.str(); } #endif #ifdef CATCH_CONFIG_CPP11_NULLPTR std::string toString( std::nullptr_t ) { return "nullptr"; } #endif #ifdef __OBJC__ std::string toString( NSString const * const& nsstring ) { if( !nsstring ) return "nil"; return "@" + toString([nsstring UTF8String]); } std::string toString( NSString * CATCH_ARC_STRONG & nsstring ) { if( !nsstring ) return "nil"; return "@" + toString([nsstring UTF8String]); } std::string toString( NSObject* const& nsObject ) { return toString( [nsObject description] ); } #endif } // end namespace Catch // #included from: catch_result_builder.hpp #define TWOBLUECUBES_CATCH_RESULT_BUILDER_HPP_INCLUDED namespace Catch { ResultBuilder::ResultBuilder( char const* macroName, SourceLineInfo const& lineInfo, char const* capturedExpression, ResultDisposition::Flags resultDisposition, char const* secondArg ) : m_assertionInfo( macroName, lineInfo, capturedExpression, resultDisposition, secondArg ), m_shouldDebugBreak( false ), m_shouldThrow( false ), m_guardException( false ) { m_stream().oss.str(""); } ResultBuilder::~ResultBuilder() { #if defined(CATCH_CONFIG_FAST_COMPILE) if ( m_guardException ) { m_stream().oss << "Exception translation was disabled by CATCH_CONFIG_FAST_COMPILE"; captureResult( ResultWas::ThrewException ); getCurrentContext().getResultCapture()->exceptionEarlyReported(); } #endif } ResultBuilder& ResultBuilder::setResultType( ResultWas::OfType result ) { m_data.resultType = result; return *this; } ResultBuilder& ResultBuilder::setResultType( bool result ) { m_data.resultType = result ? ResultWas::Ok : ResultWas::ExpressionFailed; return *this; } void ResultBuilder::endExpression( DecomposedExpression const& expr ) { AssertionResult result = build( expr ); handleResult( result ); } void ResultBuilder::useActiveException( ResultDisposition::Flags resultDisposition ) { m_assertionInfo.resultDisposition = resultDisposition; m_stream().oss << Catch::translateActiveException(); captureResult( ResultWas::ThrewException ); } void ResultBuilder::captureResult( ResultWas::OfType resultType ) { setResultType( resultType ); captureExpression(); } void ResultBuilder::captureExpectedException( std::string const& expectedMessage ) { if( expectedMessage.empty() ) captureExpectedException( Matchers::Impl::MatchAllOf() ); else captureExpectedException( Matchers::Equals( expectedMessage ) ); } void ResultBuilder::captureExpectedException( Matchers::Impl::MatcherBase const& matcher ) { assert( !isFalseTest( m_assertionInfo.resultDisposition ) ); AssertionResultData data = m_data; data.resultType = ResultWas::Ok; data.reconstructedExpression = capturedExpressionWithSecondArgument(m_assertionInfo.capturedExpression, m_assertionInfo.secondArg); std::string actualMessage = Catch::translateActiveException(); if( !matcher.match( actualMessage ) ) { data.resultType = ResultWas::ExpressionFailed; data.reconstructedExpression = actualMessage; } AssertionResult result( m_assertionInfo, data ); handleResult( result ); } void ResultBuilder::captureExpression() { AssertionResult result = build(); handleResult( result ); } void ResultBuilder::handleResult( AssertionResult const& result ) { getResultCapture().assertionEnded( result ); if( !result.isOk() ) { if( getCurrentContext().getConfig()->shouldDebugBreak() ) m_shouldDebugBreak = true; if( getCurrentContext().getRunner()->aborting() || (m_assertionInfo.resultDisposition & ResultDisposition::Normal) ) m_shouldThrow = true; } } void ResultBuilder::react() { #if defined(CATCH_CONFIG_FAST_COMPILE) if (m_shouldDebugBreak) { /////////////////////////////////////////////////////////////////// // To inspect the state during test, you need to go one level up the callstack // To go back to the test and change execution, jump over the throw statement /////////////////////////////////////////////////////////////////// CATCH_BREAK_INTO_DEBUGGER(); } #endif if( m_shouldThrow ) throw Catch::TestFailureException(); } bool ResultBuilder::shouldDebugBreak() const { return m_shouldDebugBreak; } bool ResultBuilder::allowThrows() const { return getCurrentContext().getConfig()->allowThrows(); } AssertionResult ResultBuilder::build() const { return build( *this ); } // CAVEAT: The returned AssertionResult stores a pointer to the argument expr, // a temporary DecomposedExpression, which in turn holds references to // operands, possibly temporary as well. // It should immediately be passed to handleResult; if the expression // needs to be reported, its string expansion must be composed before // the temporaries are destroyed. AssertionResult ResultBuilder::build( DecomposedExpression const& expr ) const { assert( m_data.resultType != ResultWas::Unknown ); AssertionResultData data = m_data; // Flip bool results if FalseTest flag is set if( isFalseTest( m_assertionInfo.resultDisposition ) ) { data.negate( expr.isBinaryExpression() ); } data.message = m_stream().oss.str(); data.decomposedExpression = &expr; // for lazy reconstruction return AssertionResult( m_assertionInfo, data ); } void ResultBuilder::reconstructExpression( std::string& dest ) const { dest = capturedExpressionWithSecondArgument(m_assertionInfo.capturedExpression, m_assertionInfo.secondArg); } void ResultBuilder::setExceptionGuard() { m_guardException = true; } void ResultBuilder::unsetExceptionGuard() { m_guardException = false; } } // end namespace Catch // #included from: catch_tag_alias_registry.hpp #define TWOBLUECUBES_CATCH_TAG_ALIAS_REGISTRY_HPP_INCLUDED namespace Catch { TagAliasRegistry::~TagAliasRegistry() {} Option TagAliasRegistry::find( std::string const& alias ) const { std::map::const_iterator it = m_registry.find( alias ); if( it != m_registry.end() ) return it->second; else return Option(); } std::string TagAliasRegistry::expandAliases( std::string const& unexpandedTestSpec ) const { std::string expandedTestSpec = unexpandedTestSpec; for( std::map::const_iterator it = m_registry.begin(), itEnd = m_registry.end(); it != itEnd; ++it ) { std::size_t pos = expandedTestSpec.find( it->first ); if( pos != std::string::npos ) { expandedTestSpec = expandedTestSpec.substr( 0, pos ) + it->second.tag + expandedTestSpec.substr( pos + it->first.size() ); } } return expandedTestSpec; } void TagAliasRegistry::add( std::string const& alias, std::string const& tag, SourceLineInfo const& lineInfo ) { if( !startsWith( alias, "[@" ) || !endsWith( alias, ']' ) ) { std::ostringstream oss; oss << Colour( Colour::Red ) << "error: tag alias, \"" << alias << "\" is not of the form [@alias name].\n" << Colour( Colour::FileName ) << lineInfo << '\n'; throw std::domain_error( oss.str().c_str() ); } if( !m_registry.insert( std::make_pair( alias, TagAlias( tag, lineInfo ) ) ).second ) { std::ostringstream oss; oss << Colour( Colour::Red ) << "error: tag alias, \"" << alias << "\" already registered.\n" << "\tFirst seen at " << Colour( Colour::Red ) << find(alias)->lineInfo << '\n' << Colour( Colour::Red ) << "\tRedefined at " << Colour( Colour::FileName) << lineInfo << '\n'; throw std::domain_error( oss.str().c_str() ); } } ITagAliasRegistry::~ITagAliasRegistry() {} ITagAliasRegistry const& ITagAliasRegistry::get() { return getRegistryHub().getTagAliasRegistry(); } RegistrarForTagAliases::RegistrarForTagAliases( char const* alias, char const* tag, SourceLineInfo const& lineInfo ) { getMutableRegistryHub().registerTagAlias( alias, tag, lineInfo ); } } // end namespace Catch // #included from: catch_matchers_string.hpp namespace Catch { namespace Matchers { namespace StdString { CasedString::CasedString( std::string const& str, CaseSensitive::Choice caseSensitivity ) : m_caseSensitivity( caseSensitivity ), m_str( adjustString( str ) ) {} std::string CasedString::adjustString( std::string const& str ) const { return m_caseSensitivity == CaseSensitive::No ? toLower( str ) : str; } std::string CasedString::caseSensitivitySuffix() const { return m_caseSensitivity == CaseSensitive::No ? " (case insensitive)" : std::string(); } StringMatcherBase::StringMatcherBase( std::string const& operation, CasedString const& comparator ) : m_comparator( comparator ), m_operation( operation ) { } std::string StringMatcherBase::describe() const { std::string description; description.reserve(5 + m_operation.size() + m_comparator.m_str.size() + m_comparator.caseSensitivitySuffix().size()); description += m_operation; description += ": \""; description += m_comparator.m_str; description += "\""; description += m_comparator.caseSensitivitySuffix(); return description; } EqualsMatcher::EqualsMatcher( CasedString const& comparator ) : StringMatcherBase( "equals", comparator ) {} bool EqualsMatcher::match( std::string const& source ) const { return m_comparator.adjustString( source ) == m_comparator.m_str; } ContainsMatcher::ContainsMatcher( CasedString const& comparator ) : StringMatcherBase( "contains", comparator ) {} bool ContainsMatcher::match( std::string const& source ) const { return contains( m_comparator.adjustString( source ), m_comparator.m_str ); } StartsWithMatcher::StartsWithMatcher( CasedString const& comparator ) : StringMatcherBase( "starts with", comparator ) {} bool StartsWithMatcher::match( std::string const& source ) const { return startsWith( m_comparator.adjustString( source ), m_comparator.m_str ); } EndsWithMatcher::EndsWithMatcher( CasedString const& comparator ) : StringMatcherBase( "ends with", comparator ) {} bool EndsWithMatcher::match( std::string const& source ) const { return endsWith( m_comparator.adjustString( source ), m_comparator.m_str ); } } // namespace StdString StdString::EqualsMatcher Equals( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::EqualsMatcher( StdString::CasedString( str, caseSensitivity) ); } StdString::ContainsMatcher Contains( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::ContainsMatcher( StdString::CasedString( str, caseSensitivity) ); } StdString::EndsWithMatcher EndsWith( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::EndsWithMatcher( StdString::CasedString( str, caseSensitivity) ); } StdString::StartsWithMatcher StartsWith( std::string const& str, CaseSensitive::Choice caseSensitivity ) { return StdString::StartsWithMatcher( StdString::CasedString( str, caseSensitivity) ); } } // namespace Matchers } // namespace Catch // #included from: ../reporters/catch_reporter_multi.hpp #define TWOBLUECUBES_CATCH_REPORTER_MULTI_HPP_INCLUDED namespace Catch { class MultipleReporters : public SharedImpl { typedef std::vector > Reporters; Reporters m_reporters; public: void add( Ptr const& reporter ) { m_reporters.push_back( reporter ); } public: // IStreamingReporter virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { return m_reporters[0]->getPreferences(); } virtual void noMatchingTestCases( std::string const& spec ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->noMatchingTestCases( spec ); } virtual void testRunStarting( TestRunInfo const& testRunInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testRunStarting( testRunInfo ); } virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testGroupStarting( groupInfo ); } virtual void testCaseStarting( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testCaseStarting( testInfo ); } virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->sectionStarting( sectionInfo ); } virtual void assertionStarting( AssertionInfo const& assertionInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->assertionStarting( assertionInfo ); } // The return value indicates if the messages buffer should be cleared: virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { bool clearBuffer = false; for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) clearBuffer |= (*it)->assertionEnded( assertionStats ); return clearBuffer; } virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->sectionEnded( sectionStats ); } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testCaseEnded( testCaseStats ); } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testGroupEnded( testGroupStats ); } virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->testRunEnded( testRunStats ); } virtual void skipTest( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { for( Reporters::const_iterator it = m_reporters.begin(), itEnd = m_reporters.end(); it != itEnd; ++it ) (*it)->skipTest( testInfo ); } virtual MultipleReporters* tryAsMulti() CATCH_OVERRIDE { return this; } }; Ptr addReporter( Ptr const& existingReporter, Ptr const& additionalReporter ) { Ptr resultingReporter; if( existingReporter ) { MultipleReporters* multi = existingReporter->tryAsMulti(); if( !multi ) { multi = new MultipleReporters; resultingReporter = Ptr( multi ); if( existingReporter ) multi->add( existingReporter ); } else resultingReporter = existingReporter; multi->add( additionalReporter ); } else resultingReporter = additionalReporter; return resultingReporter; } } // end namespace Catch // #included from: ../reporters/catch_reporter_xml.hpp #define TWOBLUECUBES_CATCH_REPORTER_XML_HPP_INCLUDED // #included from: catch_reporter_bases.hpp #define TWOBLUECUBES_CATCH_REPORTER_BASES_HPP_INCLUDED #include #include #include #include #include #include #include namespace Catch { namespace { std::string getFormattedDuration( double duration ) { std::stringstream ss; ss << std::setprecision(4) << duration; return ss.str(); } } struct StreamingReporterBase : SharedImpl { StreamingReporterBase( ReporterConfig const& _config ) : m_config( _config.fullConfig() ), stream( _config.stream() ) { m_reporterPrefs.shouldRedirectStdOut = false; } virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { return m_reporterPrefs; } virtual ~StreamingReporterBase() CATCH_OVERRIDE; virtual void noMatchingTestCases( std::string const& ) CATCH_OVERRIDE {} virtual void testRunStarting( TestRunInfo const& _testRunInfo ) CATCH_OVERRIDE { currentTestRunInfo = _testRunInfo; } virtual void testGroupStarting( GroupInfo const& _groupInfo ) CATCH_OVERRIDE { currentGroupInfo = _groupInfo; } virtual void testCaseStarting( TestCaseInfo const& _testInfo ) CATCH_OVERRIDE { currentTestCaseInfo = _testInfo; } virtual void sectionStarting( SectionInfo const& _sectionInfo ) CATCH_OVERRIDE { m_sectionStack.push_back( _sectionInfo ); } virtual void sectionEnded( SectionStats const& /* _sectionStats */ ) CATCH_OVERRIDE { m_sectionStack.pop_back(); } virtual void testCaseEnded( TestCaseStats const& /* _testCaseStats */ ) CATCH_OVERRIDE { currentTestCaseInfo.reset(); } virtual void testGroupEnded( TestGroupStats const& /* _testGroupStats */ ) CATCH_OVERRIDE { currentGroupInfo.reset(); } virtual void testRunEnded( TestRunStats const& /* _testRunStats */ ) CATCH_OVERRIDE { currentTestCaseInfo.reset(); currentGroupInfo.reset(); currentTestRunInfo.reset(); } virtual void skipTest( TestCaseInfo const& ) CATCH_OVERRIDE { // Don't do anything with this by default. // It can optionally be overridden in the derived class. } Ptr m_config; std::ostream& stream; LazyStat currentTestRunInfo; LazyStat currentGroupInfo; LazyStat currentTestCaseInfo; std::vector m_sectionStack; ReporterPreferences m_reporterPrefs; }; struct CumulativeReporterBase : SharedImpl { template struct Node : SharedImpl<> { explicit Node( T const& _value ) : value( _value ) {} virtual ~Node() {} typedef std::vector > ChildNodes; T value; ChildNodes children; }; struct SectionNode : SharedImpl<> { explicit SectionNode( SectionStats const& _stats ) : stats( _stats ) {} virtual ~SectionNode(); bool operator == ( SectionNode const& other ) const { return stats.sectionInfo.lineInfo == other.stats.sectionInfo.lineInfo; } bool operator == ( Ptr const& other ) const { return operator==( *other ); } SectionStats stats; typedef std::vector > ChildSections; typedef std::vector Assertions; ChildSections childSections; Assertions assertions; std::string stdOut; std::string stdErr; }; struct BySectionInfo { BySectionInfo( SectionInfo const& other ) : m_other( other ) {} BySectionInfo( BySectionInfo const& other ) : m_other( other.m_other ) {} bool operator() ( Ptr const& node ) const { return node->stats.sectionInfo.lineInfo == m_other.lineInfo; } private: void operator=( BySectionInfo const& ); SectionInfo const& m_other; }; typedef Node TestCaseNode; typedef Node TestGroupNode; typedef Node TestRunNode; CumulativeReporterBase( ReporterConfig const& _config ) : m_config( _config.fullConfig() ), stream( _config.stream() ) { m_reporterPrefs.shouldRedirectStdOut = false; } ~CumulativeReporterBase(); virtual ReporterPreferences getPreferences() const CATCH_OVERRIDE { return m_reporterPrefs; } virtual void testRunStarting( TestRunInfo const& ) CATCH_OVERRIDE {} virtual void testGroupStarting( GroupInfo const& ) CATCH_OVERRIDE {} virtual void testCaseStarting( TestCaseInfo const& ) CATCH_OVERRIDE {} virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { SectionStats incompleteStats( sectionInfo, Counts(), 0, false ); Ptr node; if( m_sectionStack.empty() ) { if( !m_rootSection ) m_rootSection = new SectionNode( incompleteStats ); node = m_rootSection; } else { SectionNode& parentNode = *m_sectionStack.back(); SectionNode::ChildSections::const_iterator it = std::find_if( parentNode.childSections.begin(), parentNode.childSections.end(), BySectionInfo( sectionInfo ) ); if( it == parentNode.childSections.end() ) { node = new SectionNode( incompleteStats ); parentNode.childSections.push_back( node ); } else node = *it; } m_sectionStack.push_back( node ); m_deepestSection = node; } virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE {} virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { assert( !m_sectionStack.empty() ); SectionNode& sectionNode = *m_sectionStack.back(); sectionNode.assertions.push_back( assertionStats ); // AssertionResult holds a pointer to a temporary DecomposedExpression, // which getExpandedExpression() calls to build the expression string. // Our section stack copy of the assertionResult will likely outlive the // temporary, so it must be expanded or discarded now to avoid calling // a destroyed object later. prepareExpandedExpression( sectionNode.assertions.back().assertionResult ); return true; } virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { assert( !m_sectionStack.empty() ); SectionNode& node = *m_sectionStack.back(); node.stats = sectionStats; m_sectionStack.pop_back(); } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { Ptr node = new TestCaseNode( testCaseStats ); assert( m_sectionStack.size() == 0 ); node->children.push_back( m_rootSection ); m_testCases.push_back( node ); m_rootSection.reset(); assert( m_deepestSection ); m_deepestSection->stdOut = testCaseStats.stdOut; m_deepestSection->stdErr = testCaseStats.stdErr; } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { Ptr node = new TestGroupNode( testGroupStats ); node->children.swap( m_testCases ); m_testGroups.push_back( node ); } virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { Ptr node = new TestRunNode( testRunStats ); node->children.swap( m_testGroups ); m_testRuns.push_back( node ); testRunEndedCumulative(); } virtual void testRunEndedCumulative() = 0; virtual void skipTest( TestCaseInfo const& ) CATCH_OVERRIDE {} virtual void prepareExpandedExpression( AssertionResult& result ) const { if( result.isOk() ) result.discardDecomposedExpression(); else result.expandDecomposedExpression(); } Ptr m_config; std::ostream& stream; std::vector m_assertions; std::vector > > m_sections; std::vector > m_testCases; std::vector > m_testGroups; std::vector > m_testRuns; Ptr m_rootSection; Ptr m_deepestSection; std::vector > m_sectionStack; ReporterPreferences m_reporterPrefs; }; template char const* getLineOfChars() { static char line[CATCH_CONFIG_CONSOLE_WIDTH] = {0}; if( !*line ) { std::memset( line, C, CATCH_CONFIG_CONSOLE_WIDTH-1 ); line[CATCH_CONFIG_CONSOLE_WIDTH-1] = 0; } return line; } struct TestEventListenerBase : StreamingReporterBase { TestEventListenerBase( ReporterConfig const& _config ) : StreamingReporterBase( _config ) {} virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE {} virtual bool assertionEnded( AssertionStats const& ) CATCH_OVERRIDE { return false; } }; } // end namespace Catch // #included from: ../internal/catch_reporter_registrars.hpp #define TWOBLUECUBES_CATCH_REPORTER_REGISTRARS_HPP_INCLUDED namespace Catch { template class LegacyReporterRegistrar { class ReporterFactory : public IReporterFactory { virtual IStreamingReporter* create( ReporterConfig const& config ) const { return new LegacyReporterAdapter( new T( config ) ); } virtual std::string getDescription() const { return T::getDescription(); } }; public: LegacyReporterRegistrar( std::string const& name ) { getMutableRegistryHub().registerReporter( name, new ReporterFactory() ); } }; template class ReporterRegistrar { class ReporterFactory : public SharedImpl { // *** Please Note ***: // - If you end up here looking at a compiler error because it's trying to register // your custom reporter class be aware that the native reporter interface has changed // to IStreamingReporter. The "legacy" interface, IReporter, is still supported via // an adapter. Just use REGISTER_LEGACY_REPORTER to take advantage of the adapter. // However please consider updating to the new interface as the old one is now // deprecated and will probably be removed quite soon! // Please contact me via github if you have any questions at all about this. // In fact, ideally, please contact me anyway to let me know you've hit this - as I have // no idea who is actually using custom reporters at all (possibly no-one!). // The new interface is designed to minimise exposure to interface changes in the future. virtual IStreamingReporter* create( ReporterConfig const& config ) const { return new T( config ); } virtual std::string getDescription() const { return T::getDescription(); } }; public: ReporterRegistrar( std::string const& name ) { getMutableRegistryHub().registerReporter( name, new ReporterFactory() ); } }; template class ListenerRegistrar { class ListenerFactory : public SharedImpl { virtual IStreamingReporter* create( ReporterConfig const& config ) const { return new T( config ); } virtual std::string getDescription() const { return std::string(); } }; public: ListenerRegistrar() { getMutableRegistryHub().registerListener( new ListenerFactory() ); } }; } #define INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) \ namespace{ Catch::LegacyReporterRegistrar catch_internal_RegistrarFor##reporterType( name ); } #define INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) \ namespace{ Catch::ReporterRegistrar catch_internal_RegistrarFor##reporterType( name ); } // Deprecated - use the form without INTERNAL_ #define INTERNAL_CATCH_REGISTER_LISTENER( listenerType ) \ namespace{ Catch::ListenerRegistrar catch_internal_RegistrarFor##listenerType; } #define CATCH_REGISTER_LISTENER( listenerType ) \ namespace{ Catch::ListenerRegistrar catch_internal_RegistrarFor##listenerType; } // #included from: ../internal/catch_xmlwriter.hpp #define TWOBLUECUBES_CATCH_XMLWRITER_HPP_INCLUDED #include #include #include #include namespace Catch { class XmlEncode { public: enum ForWhat { ForTextNodes, ForAttributes }; XmlEncode( std::string const& str, ForWhat forWhat = ForTextNodes ) : m_str( str ), m_forWhat( forWhat ) {} void encodeTo( std::ostream& os ) const { // Apostrophe escaping not necessary if we always use " to write attributes // (see: http://www.w3.org/TR/xml/#syntax) for( std::size_t i = 0; i < m_str.size(); ++ i ) { char c = m_str[i]; switch( c ) { case '<': os << "<"; break; case '&': os << "&"; break; case '>': // See: http://www.w3.org/TR/xml/#syntax if( i > 2 && m_str[i-1] == ']' && m_str[i-2] == ']' ) os << ">"; else os << c; break; case '\"': if( m_forWhat == ForAttributes ) os << """; else os << c; break; default: // Escape control chars - based on contribution by @espenalb in PR #465 and // by @mrpi PR #588 if ( ( c >= 0 && c < '\x09' ) || ( c > '\x0D' && c < '\x20') || c=='\x7F' ) { // see http://stackoverflow.com/questions/404107/why-are-control-characters-illegal-in-xml-1-0 os << "\\x" << std::uppercase << std::hex << std::setfill('0') << std::setw(2) << static_cast( c ); } else os << c; } } } friend std::ostream& operator << ( std::ostream& os, XmlEncode const& xmlEncode ) { xmlEncode.encodeTo( os ); return os; } private: std::string m_str; ForWhat m_forWhat; }; class XmlWriter { public: class ScopedElement { public: ScopedElement( XmlWriter* writer ) : m_writer( writer ) {} ScopedElement( ScopedElement const& other ) : m_writer( other.m_writer ){ other.m_writer = CATCH_NULL; } ~ScopedElement() { if( m_writer ) m_writer->endElement(); } ScopedElement& writeText( std::string const& text, bool indent = true ) { m_writer->writeText( text, indent ); return *this; } template ScopedElement& writeAttribute( std::string const& name, T const& attribute ) { m_writer->writeAttribute( name, attribute ); return *this; } private: mutable XmlWriter* m_writer; }; XmlWriter() : m_tagIsOpen( false ), m_needsNewline( false ), m_os( Catch::cout() ) { writeDeclaration(); } XmlWriter( std::ostream& os ) : m_tagIsOpen( false ), m_needsNewline( false ), m_os( os ) { writeDeclaration(); } ~XmlWriter() { while( !m_tags.empty() ) endElement(); } XmlWriter& startElement( std::string const& name ) { ensureTagClosed(); newlineIfNecessary(); m_os << m_indent << '<' << name; m_tags.push_back( name ); m_indent += " "; m_tagIsOpen = true; return *this; } ScopedElement scopedElement( std::string const& name ) { ScopedElement scoped( this ); startElement( name ); return scoped; } XmlWriter& endElement() { newlineIfNecessary(); m_indent = m_indent.substr( 0, m_indent.size()-2 ); if( m_tagIsOpen ) { m_os << "/>"; m_tagIsOpen = false; } else { m_os << m_indent << ""; } m_os << std::endl; m_tags.pop_back(); return *this; } XmlWriter& writeAttribute( std::string const& name, std::string const& attribute ) { if( !name.empty() && !attribute.empty() ) m_os << ' ' << name << "=\"" << XmlEncode( attribute, XmlEncode::ForAttributes ) << '"'; return *this; } XmlWriter& writeAttribute( std::string const& name, bool attribute ) { m_os << ' ' << name << "=\"" << ( attribute ? "true" : "false" ) << '"'; return *this; } template XmlWriter& writeAttribute( std::string const& name, T const& attribute ) { std::ostringstream oss; oss << attribute; return writeAttribute( name, oss.str() ); } XmlWriter& writeText( std::string const& text, bool indent = true ) { if( !text.empty() ){ bool tagWasOpen = m_tagIsOpen; ensureTagClosed(); if( tagWasOpen && indent ) m_os << m_indent; m_os << XmlEncode( text ); m_needsNewline = true; } return *this; } XmlWriter& writeComment( std::string const& text ) { ensureTagClosed(); m_os << m_indent << ""; m_needsNewline = true; return *this; } void writeStylesheetRef( std::string const& url ) { m_os << "\n"; } XmlWriter& writeBlankLine() { ensureTagClosed(); m_os << '\n'; return *this; } void ensureTagClosed() { if( m_tagIsOpen ) { m_os << ">" << std::endl; m_tagIsOpen = false; } } private: XmlWriter( XmlWriter const& ); void operator=( XmlWriter const& ); void writeDeclaration() { m_os << "\n"; } void newlineIfNecessary() { if( m_needsNewline ) { m_os << std::endl; m_needsNewline = false; } } bool m_tagIsOpen; bool m_needsNewline; std::vector m_tags; std::string m_indent; std::ostream& m_os; }; } namespace Catch { class XmlReporter : public StreamingReporterBase { public: XmlReporter( ReporterConfig const& _config ) : StreamingReporterBase( _config ), m_xml(_config.stream()), m_sectionDepth( 0 ) { m_reporterPrefs.shouldRedirectStdOut = true; } virtual ~XmlReporter() CATCH_OVERRIDE; static std::string getDescription() { return "Reports test results as an XML document"; } virtual std::string getStylesheetRef() const { return std::string(); } void writeSourceInfo( SourceLineInfo const& sourceInfo ) { m_xml .writeAttribute( "filename", sourceInfo.file ) .writeAttribute( "line", sourceInfo.line ); } public: // StreamingReporterBase virtual void noMatchingTestCases( std::string const& s ) CATCH_OVERRIDE { StreamingReporterBase::noMatchingTestCases( s ); } virtual void testRunStarting( TestRunInfo const& testInfo ) CATCH_OVERRIDE { StreamingReporterBase::testRunStarting( testInfo ); std::string stylesheetRef = getStylesheetRef(); if( !stylesheetRef.empty() ) m_xml.writeStylesheetRef( stylesheetRef ); m_xml.startElement( "Catch" ); if( !m_config->name().empty() ) m_xml.writeAttribute( "name", m_config->name() ); } virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { StreamingReporterBase::testGroupStarting( groupInfo ); m_xml.startElement( "Group" ) .writeAttribute( "name", groupInfo.name ); } virtual void testCaseStarting( TestCaseInfo const& testInfo ) CATCH_OVERRIDE { StreamingReporterBase::testCaseStarting(testInfo); m_xml.startElement( "TestCase" ) .writeAttribute( "name", trim( testInfo.name ) ) .writeAttribute( "description", testInfo.description ) .writeAttribute( "tags", testInfo.tagsAsString ); writeSourceInfo( testInfo.lineInfo ); if ( m_config->showDurations() == ShowDurations::Always ) m_testCaseTimer.start(); m_xml.ensureTagClosed(); } virtual void sectionStarting( SectionInfo const& sectionInfo ) CATCH_OVERRIDE { StreamingReporterBase::sectionStarting( sectionInfo ); if( m_sectionDepth++ > 0 ) { m_xml.startElement( "Section" ) .writeAttribute( "name", trim( sectionInfo.name ) ) .writeAttribute( "description", sectionInfo.description ); writeSourceInfo( sectionInfo.lineInfo ); m_xml.ensureTagClosed(); } } virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE { } virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { AssertionResult const& result = assertionStats.assertionResult; bool includeResults = m_config->includeSuccessfulResults() || !result.isOk(); if( includeResults ) { // Print any info messages in tags. for( std::vector::const_iterator it = assertionStats.infoMessages.begin(), itEnd = assertionStats.infoMessages.end(); it != itEnd; ++it ) { if( it->type == ResultWas::Info ) { m_xml.scopedElement( "Info" ) .writeText( it->message ); } else if ( it->type == ResultWas::Warning ) { m_xml.scopedElement( "Warning" ) .writeText( it->message ); } } } // Drop out if result was successful but we're not printing them. if( !includeResults && result.getResultType() != ResultWas::Warning ) return true; // Print the expression if there is one. if( result.hasExpression() ) { m_xml.startElement( "Expression" ) .writeAttribute( "success", result.succeeded() ) .writeAttribute( "type", result.getTestMacroName() ); writeSourceInfo( result.getSourceInfo() ); m_xml.scopedElement( "Original" ) .writeText( result.getExpression() ); m_xml.scopedElement( "Expanded" ) .writeText( result.getExpandedExpression() ); } // And... Print a result applicable to each result type. switch( result.getResultType() ) { case ResultWas::ThrewException: m_xml.startElement( "Exception" ); writeSourceInfo( result.getSourceInfo() ); m_xml.writeText( result.getMessage() ); m_xml.endElement(); break; case ResultWas::FatalErrorCondition: m_xml.startElement( "FatalErrorCondition" ); writeSourceInfo( result.getSourceInfo() ); m_xml.writeText( result.getMessage() ); m_xml.endElement(); break; case ResultWas::Info: m_xml.scopedElement( "Info" ) .writeText( result.getMessage() ); break; case ResultWas::Warning: // Warning will already have been written break; case ResultWas::ExplicitFailure: m_xml.startElement( "Failure" ); writeSourceInfo( result.getSourceInfo() ); m_xml.writeText( result.getMessage() ); m_xml.endElement(); break; default: break; } if( result.hasExpression() ) m_xml.endElement(); return true; } virtual void sectionEnded( SectionStats const& sectionStats ) CATCH_OVERRIDE { StreamingReporterBase::sectionEnded( sectionStats ); if( --m_sectionDepth > 0 ) { XmlWriter::ScopedElement e = m_xml.scopedElement( "OverallResults" ); e.writeAttribute( "successes", sectionStats.assertions.passed ); e.writeAttribute( "failures", sectionStats.assertions.failed ); e.writeAttribute( "expectedFailures", sectionStats.assertions.failedButOk ); if ( m_config->showDurations() == ShowDurations::Always ) e.writeAttribute( "durationInSeconds", sectionStats.durationInSeconds ); m_xml.endElement(); } } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { StreamingReporterBase::testCaseEnded( testCaseStats ); XmlWriter::ScopedElement e = m_xml.scopedElement( "OverallResult" ); e.writeAttribute( "success", testCaseStats.totals.assertions.allOk() ); if ( m_config->showDurations() == ShowDurations::Always ) e.writeAttribute( "durationInSeconds", m_testCaseTimer.getElapsedSeconds() ); if( !testCaseStats.stdOut.empty() ) m_xml.scopedElement( "StdOut" ).writeText( trim( testCaseStats.stdOut ), false ); if( !testCaseStats.stdErr.empty() ) m_xml.scopedElement( "StdErr" ).writeText( trim( testCaseStats.stdErr ), false ); m_xml.endElement(); } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { StreamingReporterBase::testGroupEnded( testGroupStats ); // TODO: Check testGroupStats.aborting and act accordingly. m_xml.scopedElement( "OverallResults" ) .writeAttribute( "successes", testGroupStats.totals.assertions.passed ) .writeAttribute( "failures", testGroupStats.totals.assertions.failed ) .writeAttribute( "expectedFailures", testGroupStats.totals.assertions.failedButOk ); m_xml.endElement(); } virtual void testRunEnded( TestRunStats const& testRunStats ) CATCH_OVERRIDE { StreamingReporterBase::testRunEnded( testRunStats ); m_xml.scopedElement( "OverallResults" ) .writeAttribute( "successes", testRunStats.totals.assertions.passed ) .writeAttribute( "failures", testRunStats.totals.assertions.failed ) .writeAttribute( "expectedFailures", testRunStats.totals.assertions.failedButOk ); m_xml.endElement(); } private: Timer m_testCaseTimer; XmlWriter m_xml; int m_sectionDepth; }; INTERNAL_CATCH_REGISTER_REPORTER( "xml", XmlReporter ) } // end namespace Catch // #included from: ../reporters/catch_reporter_junit.hpp #define TWOBLUECUBES_CATCH_REPORTER_JUNIT_HPP_INCLUDED #include namespace Catch { namespace { std::string getCurrentTimestamp() { // Beware, this is not reentrant because of backward compatibility issues // Also, UTC only, again because of backward compatibility (%z is C++11) time_t rawtime; std::time(&rawtime); const size_t timeStampSize = sizeof("2017-01-16T17:06:45Z"); #ifdef _MSC_VER std::tm timeInfo = {}; gmtime_s(&timeInfo, &rawtime); #else std::tm* timeInfo; timeInfo = std::gmtime(&rawtime); #endif char timeStamp[timeStampSize]; const char * const fmt = "%Y-%m-%dT%H:%M:%SZ"; #ifdef _MSC_VER std::strftime(timeStamp, timeStampSize, fmt, &timeInfo); #else std::strftime(timeStamp, timeStampSize, fmt, timeInfo); #endif return std::string(timeStamp); } } class JunitReporter : public CumulativeReporterBase { public: JunitReporter( ReporterConfig const& _config ) : CumulativeReporterBase( _config ), xml( _config.stream() ), m_okToFail( false ) { m_reporterPrefs.shouldRedirectStdOut = true; } virtual ~JunitReporter() CATCH_OVERRIDE; static std::string getDescription() { return "Reports test results in an XML format that looks like Ant's junitreport target"; } virtual void noMatchingTestCases( std::string const& /*spec*/ ) CATCH_OVERRIDE {} virtual void testRunStarting( TestRunInfo const& runInfo ) CATCH_OVERRIDE { CumulativeReporterBase::testRunStarting( runInfo ); xml.startElement( "testsuites" ); } virtual void testGroupStarting( GroupInfo const& groupInfo ) CATCH_OVERRIDE { suiteTimer.start(); stdOutForSuite.str(""); stdErrForSuite.str(""); unexpectedExceptions = 0; CumulativeReporterBase::testGroupStarting( groupInfo ); } virtual void testCaseStarting( TestCaseInfo const& testCaseInfo ) CATCH_OVERRIDE { m_okToFail = testCaseInfo.okToFail(); } virtual bool assertionEnded( AssertionStats const& assertionStats ) CATCH_OVERRIDE { if( assertionStats.assertionResult.getResultType() == ResultWas::ThrewException && !m_okToFail ) unexpectedExceptions++; return CumulativeReporterBase::assertionEnded( assertionStats ); } virtual void testCaseEnded( TestCaseStats const& testCaseStats ) CATCH_OVERRIDE { stdOutForSuite << testCaseStats.stdOut; stdErrForSuite << testCaseStats.stdErr; CumulativeReporterBase::testCaseEnded( testCaseStats ); } virtual void testGroupEnded( TestGroupStats const& testGroupStats ) CATCH_OVERRIDE { double suiteTime = suiteTimer.getElapsedSeconds(); CumulativeReporterBase::testGroupEnded( testGroupStats ); writeGroup( *m_testGroups.back(), suiteTime ); } virtual void testRunEndedCumulative() CATCH_OVERRIDE { xml.endElement(); } void writeGroup( TestGroupNode const& groupNode, double suiteTime ) { XmlWriter::ScopedElement e = xml.scopedElement( "testsuite" ); TestGroupStats const& stats = groupNode.value; xml.writeAttribute( "name", stats.groupInfo.name ); xml.writeAttribute( "errors", unexpectedExceptions ); xml.writeAttribute( "failures", stats.totals.assertions.failed-unexpectedExceptions ); xml.writeAttribute( "tests", stats.totals.assertions.total() ); xml.writeAttribute( "hostname", "tbd" ); // !TBD if( m_config->showDurations() == ShowDurations::Never ) xml.writeAttribute( "time", "" ); else xml.writeAttribute( "time", suiteTime ); xml.writeAttribute( "timestamp", getCurrentTimestamp() ); // Write test cases for( TestGroupNode::ChildNodes::const_iterator it = groupNode.children.begin(), itEnd = groupNode.children.end(); it != itEnd; ++it ) writeTestCase( **it ); xml.scopedElement( "system-out" ).writeText( trim( stdOutForSuite.str() ), false ); xml.scopedElement( "system-err" ).writeText( trim( stdErrForSuite.str() ), false ); } void writeTestCase( TestCaseNode const& testCaseNode ) { TestCaseStats const& stats = testCaseNode.value; // All test cases have exactly one section - which represents the // test case itself. That section may have 0-n nested sections assert( testCaseNode.children.size() == 1 ); SectionNode const& rootSection = *testCaseNode.children.front(); std::string className = stats.testInfo.className; if( className.empty() ) { if( rootSection.childSections.empty() ) className = "global"; } writeSection( className, "", rootSection ); } void writeSection( std::string const& className, std::string const& rootName, SectionNode const& sectionNode ) { std::string name = trim( sectionNode.stats.sectionInfo.name ); if( !rootName.empty() ) name = rootName + '/' + name; if( !sectionNode.assertions.empty() || !sectionNode.stdOut.empty() || !sectionNode.stdErr.empty() ) { XmlWriter::ScopedElement e = xml.scopedElement( "testcase" ); if( className.empty() ) { xml.writeAttribute( "classname", name ); xml.writeAttribute( "name", "root" ); } else { xml.writeAttribute( "classname", className ); xml.writeAttribute( "name", name ); } xml.writeAttribute( "time", Catch::toString( sectionNode.stats.durationInSeconds ) ); writeAssertions( sectionNode ); if( !sectionNode.stdOut.empty() ) xml.scopedElement( "system-out" ).writeText( trim( sectionNode.stdOut ), false ); if( !sectionNode.stdErr.empty() ) xml.scopedElement( "system-err" ).writeText( trim( sectionNode.stdErr ), false ); } for( SectionNode::ChildSections::const_iterator it = sectionNode.childSections.begin(), itEnd = sectionNode.childSections.end(); it != itEnd; ++it ) if( className.empty() ) writeSection( name, "", **it ); else writeSection( className, name, **it ); } void writeAssertions( SectionNode const& sectionNode ) { for( SectionNode::Assertions::const_iterator it = sectionNode.assertions.begin(), itEnd = sectionNode.assertions.end(); it != itEnd; ++it ) writeAssertion( *it ); } void writeAssertion( AssertionStats const& stats ) { AssertionResult const& result = stats.assertionResult; if( !result.isOk() ) { std::string elementName; switch( result.getResultType() ) { case ResultWas::ThrewException: case ResultWas::FatalErrorCondition: elementName = "error"; break; case ResultWas::ExplicitFailure: elementName = "failure"; break; case ResultWas::ExpressionFailed: elementName = "failure"; break; case ResultWas::DidntThrowException: elementName = "failure"; break; // We should never see these here: case ResultWas::Info: case ResultWas::Warning: case ResultWas::Ok: case ResultWas::Unknown: case ResultWas::FailureBit: case ResultWas::Exception: elementName = "internalError"; break; } XmlWriter::ScopedElement e = xml.scopedElement( elementName ); xml.writeAttribute( "message", result.getExpandedExpression() ); xml.writeAttribute( "type", result.getTestMacroName() ); std::ostringstream oss; if( !result.getMessage().empty() ) oss << result.getMessage() << '\n'; for( std::vector::const_iterator it = stats.infoMessages.begin(), itEnd = stats.infoMessages.end(); it != itEnd; ++it ) if( it->type == ResultWas::Info ) oss << it->message << '\n'; oss << "at " << result.getSourceInfo(); xml.writeText( oss.str(), false ); } } XmlWriter xml; Timer suiteTimer; std::ostringstream stdOutForSuite; std::ostringstream stdErrForSuite; unsigned int unexpectedExceptions; bool m_okToFail; }; INTERNAL_CATCH_REGISTER_REPORTER( "junit", JunitReporter ) } // end namespace Catch // #included from: ../reporters/catch_reporter_console.hpp #define TWOBLUECUBES_CATCH_REPORTER_CONSOLE_HPP_INCLUDED #include #include namespace Catch { struct ConsoleReporter : StreamingReporterBase { ConsoleReporter( ReporterConfig const& _config ) : StreamingReporterBase( _config ), m_headerPrinted( false ) {} virtual ~ConsoleReporter() CATCH_OVERRIDE; static std::string getDescription() { return "Reports test results as plain lines of text"; } virtual void noMatchingTestCases( std::string const& spec ) CATCH_OVERRIDE { stream << "No test cases matched '" << spec << '\'' << std::endl; } virtual void assertionStarting( AssertionInfo const& ) CATCH_OVERRIDE { } virtual bool assertionEnded( AssertionStats const& _assertionStats ) CATCH_OVERRIDE { AssertionResult const& result = _assertionStats.assertionResult; bool includeResults = m_config->includeSuccessfulResults() || !result.isOk(); // Drop out if result was successful but we're not printing them. if( !includeResults && result.getResultType() != ResultWas::Warning ) return false; lazyPrint(); AssertionPrinter printer( stream, _assertionStats, includeResults ); printer.print(); stream << std::endl; return true; } virtual void sectionStarting( SectionInfo const& _sectionInfo ) CATCH_OVERRIDE { m_headerPrinted = false; StreamingReporterBase::sectionStarting( _sectionInfo ); } virtual void sectionEnded( SectionStats const& _sectionStats ) CATCH_OVERRIDE { if( _sectionStats.missingAssertions ) { lazyPrint(); Colour colour( Colour::ResultError ); if( m_sectionStack.size() > 1 ) stream << "\nNo assertions in section"; else stream << "\nNo assertions in test case"; stream << " '" << _sectionStats.sectionInfo.name << "'\n" << std::endl; } if( m_config->showDurations() == ShowDurations::Always ) { stream << getFormattedDuration(_sectionStats.durationInSeconds) << " s: " << _sectionStats.sectionInfo.name << std::endl; } if( m_headerPrinted ) { m_headerPrinted = false; } StreamingReporterBase::sectionEnded( _sectionStats ); } virtual void testCaseEnded( TestCaseStats const& _testCaseStats ) CATCH_OVERRIDE { StreamingReporterBase::testCaseEnded( _testCaseStats ); m_headerPrinted = false; } virtual void testGroupEnded( TestGroupStats const& _testGroupStats ) CATCH_OVERRIDE { if( currentGroupInfo.used ) { printSummaryDivider(); stream << "Summary for group '" << _testGroupStats.groupInfo.name << "':\n"; printTotals( _testGroupStats.totals ); stream << '\n' << std::endl; } StreamingReporterBase::testGroupEnded( _testGroupStats ); } virtual void testRunEnded( TestRunStats const& _testRunStats ) CATCH_OVERRIDE { printTotalsDivider( _testRunStats.totals ); printTotals( _testRunStats.totals ); stream << std::endl; StreamingReporterBase::testRunEnded( _testRunStats ); } private: class AssertionPrinter { void operator= ( AssertionPrinter const& ); public: AssertionPrinter( std::ostream& _stream, AssertionStats const& _stats, bool _printInfoMessages ) : stream( _stream ), stats( _stats ), result( _stats.assertionResult ), colour( Colour::None ), message( result.getMessage() ), messages( _stats.infoMessages ), printInfoMessages( _printInfoMessages ) { switch( result.getResultType() ) { case ResultWas::Ok: colour = Colour::Success; passOrFail = "PASSED"; //if( result.hasMessage() ) if( _stats.infoMessages.size() == 1 ) messageLabel = "with message"; if( _stats.infoMessages.size() > 1 ) messageLabel = "with messages"; break; case ResultWas::ExpressionFailed: if( result.isOk() ) { colour = Colour::Success; passOrFail = "FAILED - but was ok"; } else { colour = Colour::Error; passOrFail = "FAILED"; } if( _stats.infoMessages.size() == 1 ) messageLabel = "with message"; if( _stats.infoMessages.size() > 1 ) messageLabel = "with messages"; break; case ResultWas::ThrewException: colour = Colour::Error; passOrFail = "FAILED"; messageLabel = "due to unexpected exception with "; if (_stats.infoMessages.size() == 1) messageLabel += "message"; if (_stats.infoMessages.size() > 1) messageLabel += "messages"; break; case ResultWas::FatalErrorCondition: colour = Colour::Error; passOrFail = "FAILED"; messageLabel = "due to a fatal error condition"; break; case ResultWas::DidntThrowException: colour = Colour::Error; passOrFail = "FAILED"; messageLabel = "because no exception was thrown where one was expected"; break; case ResultWas::Info: messageLabel = "info"; break; case ResultWas::Warning: messageLabel = "warning"; break; case ResultWas::ExplicitFailure: passOrFail = "FAILED"; colour = Colour::Error; if( _stats.infoMessages.size() == 1 ) messageLabel = "explicitly with message"; if( _stats.infoMessages.size() > 1 ) messageLabel = "explicitly with messages"; break; // These cases are here to prevent compiler warnings case ResultWas::Unknown: case ResultWas::FailureBit: case ResultWas::Exception: passOrFail = "** internal error **"; colour = Colour::Error; break; } } void print() const { printSourceInfo(); if( stats.totals.assertions.total() > 0 ) { if( result.isOk() ) stream << '\n'; printResultType(); printOriginalExpression(); printReconstructedExpression(); } else { stream << '\n'; } printMessage(); } private: void printResultType() const { if( !passOrFail.empty() ) { Colour colourGuard( colour ); stream << passOrFail << ":\n"; } } void printOriginalExpression() const { if( result.hasExpression() ) { Colour colourGuard( Colour::OriginalExpression ); stream << " "; stream << result.getExpressionInMacro(); stream << '\n'; } } void printReconstructedExpression() const { if( result.hasExpandedExpression() ) { stream << "with expansion:\n"; Colour colourGuard( Colour::ReconstructedExpression ); stream << Text( result.getExpandedExpression(), TextAttributes().setIndent(2) ) << '\n'; } } void printMessage() const { if( !messageLabel.empty() ) stream << messageLabel << ':' << '\n'; for( std::vector::const_iterator it = messages.begin(), itEnd = messages.end(); it != itEnd; ++it ) { // If this assertion is a warning ignore any INFO messages if( printInfoMessages || it->type != ResultWas::Info ) stream << Text( it->message, TextAttributes().setIndent(2) ) << '\n'; } } void printSourceInfo() const { Colour colourGuard( Colour::FileName ); stream << result.getSourceInfo() << ": "; } std::ostream& stream; AssertionStats const& stats; AssertionResult const& result; Colour::Code colour; std::string passOrFail; std::string messageLabel; std::string message; std::vector messages; bool printInfoMessages; }; void lazyPrint() { if( !currentTestRunInfo.used ) lazyPrintRunInfo(); if( !currentGroupInfo.used ) lazyPrintGroupInfo(); if( !m_headerPrinted ) { printTestCaseAndSectionHeader(); m_headerPrinted = true; } } void lazyPrintRunInfo() { stream << '\n' << getLineOfChars<'~'>() << '\n'; Colour colour( Colour::SecondaryText ); stream << currentTestRunInfo->name << " is a Catch v" << libraryVersion() << " host application.\n" << "Run with -? for options\n\n"; if( m_config->rngSeed() != 0 ) stream << "Randomness seeded to: " << m_config->rngSeed() << "\n\n"; currentTestRunInfo.used = true; } void lazyPrintGroupInfo() { if( !currentGroupInfo->name.empty() && currentGroupInfo->groupsCounts > 1 ) { printClosedHeader( "Group: " + currentGroupInfo->name ); currentGroupInfo.used = true; } } void printTestCaseAndSectionHeader() { assert( !m_sectionStack.empty() ); printOpenHeader( currentTestCaseInfo->name ); if( m_sectionStack.size() > 1 ) { Colour colourGuard( Colour::Headers ); std::vector::const_iterator it = m_sectionStack.begin()+1, // Skip first section (test case) itEnd = m_sectionStack.end(); for( ; it != itEnd; ++it ) printHeaderString( it->name, 2 ); } SourceLineInfo lineInfo = m_sectionStack.back().lineInfo; if( !lineInfo.empty() ){ stream << getLineOfChars<'-'>() << '\n'; Colour colourGuard( Colour::FileName ); stream << lineInfo << '\n'; } stream << getLineOfChars<'.'>() << '\n' << std::endl; } void printClosedHeader( std::string const& _name ) { printOpenHeader( _name ); stream << getLineOfChars<'.'>() << '\n'; } void printOpenHeader( std::string const& _name ) { stream << getLineOfChars<'-'>() << '\n'; { Colour colourGuard( Colour::Headers ); printHeaderString( _name ); } } // if string has a : in first line will set indent to follow it on // subsequent lines void printHeaderString( std::string const& _string, std::size_t indent = 0 ) { std::size_t i = _string.find( ": " ); if( i != std::string::npos ) i+=2; else i = 0; stream << Text( _string, TextAttributes() .setIndent( indent+i) .setInitialIndent( indent ) ) << '\n'; } struct SummaryColumn { SummaryColumn( std::string const& _label, Colour::Code _colour ) : label( _label ), colour( _colour ) {} SummaryColumn addRow( std::size_t count ) { std::ostringstream oss; oss << count; std::string row = oss.str(); for( std::vector::iterator it = rows.begin(); it != rows.end(); ++it ) { while( it->size() < row.size() ) *it = ' ' + *it; while( it->size() > row.size() ) row = ' ' + row; } rows.push_back( row ); return *this; } std::string label; Colour::Code colour; std::vector rows; }; void printTotals( Totals const& totals ) { if( totals.testCases.total() == 0 ) { stream << Colour( Colour::Warning ) << "No tests ran\n"; } else if( totals.assertions.total() > 0 && totals.testCases.allPassed() ) { stream << Colour( Colour::ResultSuccess ) << "All tests passed"; stream << " (" << pluralise( totals.assertions.passed, "assertion" ) << " in " << pluralise( totals.testCases.passed, "test case" ) << ')' << '\n'; } else { std::vector columns; columns.push_back( SummaryColumn( "", Colour::None ) .addRow( totals.testCases.total() ) .addRow( totals.assertions.total() ) ); columns.push_back( SummaryColumn( "passed", Colour::Success ) .addRow( totals.testCases.passed ) .addRow( totals.assertions.passed ) ); columns.push_back( SummaryColumn( "failed", Colour::ResultError ) .addRow( totals.testCases.failed ) .addRow( totals.assertions.failed ) ); columns.push_back( SummaryColumn( "failed as expected", Colour::ResultExpectedFailure ) .addRow( totals.testCases.failedButOk ) .addRow( totals.assertions.failedButOk ) ); printSummaryRow( "test cases", columns, 0 ); printSummaryRow( "assertions", columns, 1 ); } } void printSummaryRow( std::string const& label, std::vector const& cols, std::size_t row ) { for( std::vector::const_iterator it = cols.begin(); it != cols.end(); ++it ) { std::string value = it->rows[row]; if( it->label.empty() ) { stream << label << ": "; if( value != "0" ) stream << value; else stream << Colour( Colour::Warning ) << "- none -"; } else if( value != "0" ) { stream << Colour( Colour::LightGrey ) << " | "; stream << Colour( it->colour ) << value << ' ' << it->label; } } stream << '\n'; } static std::size_t makeRatio( std::size_t number, std::size_t total ) { std::size_t ratio = total > 0 ? CATCH_CONFIG_CONSOLE_WIDTH * number/ total : 0; return ( ratio == 0 && number > 0 ) ? 1 : ratio; } static std::size_t& findMax( std::size_t& i, std::size_t& j, std::size_t& k ) { if( i > j && i > k ) return i; else if( j > k ) return j; else return k; } void printTotalsDivider( Totals const& totals ) { if( totals.testCases.total() > 0 ) { std::size_t failedRatio = makeRatio( totals.testCases.failed, totals.testCases.total() ); std::size_t failedButOkRatio = makeRatio( totals.testCases.failedButOk, totals.testCases.total() ); std::size_t passedRatio = makeRatio( totals.testCases.passed, totals.testCases.total() ); while( failedRatio + failedButOkRatio + passedRatio < CATCH_CONFIG_CONSOLE_WIDTH-1 ) findMax( failedRatio, failedButOkRatio, passedRatio )++; while( failedRatio + failedButOkRatio + passedRatio > CATCH_CONFIG_CONSOLE_WIDTH-1 ) findMax( failedRatio, failedButOkRatio, passedRatio )--; stream << Colour( Colour::Error ) << std::string( failedRatio, '=' ); stream << Colour( Colour::ResultExpectedFailure ) << std::string( failedButOkRatio, '=' ); if( totals.testCases.allPassed() ) stream << Colour( Colour::ResultSuccess ) << std::string( passedRatio, '=' ); else stream << Colour( Colour::Success ) << std::string( passedRatio, '=' ); } else { stream << Colour( Colour::Warning ) << std::string( CATCH_CONFIG_CONSOLE_WIDTH-1, '=' ); } stream << '\n'; } void printSummaryDivider() { stream << getLineOfChars<'-'>() << '\n'; } private: bool m_headerPrinted; }; INTERNAL_CATCH_REGISTER_REPORTER( "console", ConsoleReporter ) } // end namespace Catch // #included from: ../reporters/catch_reporter_compact.hpp #define TWOBLUECUBES_CATCH_REPORTER_COMPACT_HPP_INCLUDED namespace Catch { struct CompactReporter : StreamingReporterBase { CompactReporter( ReporterConfig const& _config ) : StreamingReporterBase( _config ) {} virtual ~CompactReporter(); static std::string getDescription() { return "Reports test results on a single line, suitable for IDEs"; } virtual ReporterPreferences getPreferences() const { ReporterPreferences prefs; prefs.shouldRedirectStdOut = false; return prefs; } virtual void noMatchingTestCases( std::string const& spec ) { stream << "No test cases matched '" << spec << '\'' << std::endl; } virtual void assertionStarting( AssertionInfo const& ) {} virtual bool assertionEnded( AssertionStats const& _assertionStats ) { AssertionResult const& result = _assertionStats.assertionResult; bool printInfoMessages = true; // Drop out if result was successful and we're not printing those if( !m_config->includeSuccessfulResults() && result.isOk() ) { if( result.getResultType() != ResultWas::Warning ) return false; printInfoMessages = false; } AssertionPrinter printer( stream, _assertionStats, printInfoMessages ); printer.print(); stream << std::endl; return true; } virtual void sectionEnded(SectionStats const& _sectionStats) CATCH_OVERRIDE { if (m_config->showDurations() == ShowDurations::Always) { stream << getFormattedDuration(_sectionStats.durationInSeconds) << " s: " << _sectionStats.sectionInfo.name << std::endl; } } virtual void testRunEnded( TestRunStats const& _testRunStats ) { printTotals( _testRunStats.totals ); stream << '\n' << std::endl; StreamingReporterBase::testRunEnded( _testRunStats ); } private: class AssertionPrinter { void operator= ( AssertionPrinter const& ); public: AssertionPrinter( std::ostream& _stream, AssertionStats const& _stats, bool _printInfoMessages ) : stream( _stream ) , stats( _stats ) , result( _stats.assertionResult ) , messages( _stats.infoMessages ) , itMessage( _stats.infoMessages.begin() ) , printInfoMessages( _printInfoMessages ) {} void print() { printSourceInfo(); itMessage = messages.begin(); switch( result.getResultType() ) { case ResultWas::Ok: printResultType( Colour::ResultSuccess, passedString() ); printOriginalExpression(); printReconstructedExpression(); if ( ! result.hasExpression() ) printRemainingMessages( Colour::None ); else printRemainingMessages(); break; case ResultWas::ExpressionFailed: if( result.isOk() ) printResultType( Colour::ResultSuccess, failedString() + std::string( " - but was ok" ) ); else printResultType( Colour::Error, failedString() ); printOriginalExpression(); printReconstructedExpression(); printRemainingMessages(); break; case ResultWas::ThrewException: printResultType( Colour::Error, failedString() ); printIssue( "unexpected exception with message:" ); printMessage(); printExpressionWas(); printRemainingMessages(); break; case ResultWas::FatalErrorCondition: printResultType( Colour::Error, failedString() ); printIssue( "fatal error condition with message:" ); printMessage(); printExpressionWas(); printRemainingMessages(); break; case ResultWas::DidntThrowException: printResultType( Colour::Error, failedString() ); printIssue( "expected exception, got none" ); printExpressionWas(); printRemainingMessages(); break; case ResultWas::Info: printResultType( Colour::None, "info" ); printMessage(); printRemainingMessages(); break; case ResultWas::Warning: printResultType( Colour::None, "warning" ); printMessage(); printRemainingMessages(); break; case ResultWas::ExplicitFailure: printResultType( Colour::Error, failedString() ); printIssue( "explicitly" ); printRemainingMessages( Colour::None ); break; // These cases are here to prevent compiler warnings case ResultWas::Unknown: case ResultWas::FailureBit: case ResultWas::Exception: printResultType( Colour::Error, "** internal error **" ); break; } } private: // Colour::LightGrey static Colour::Code dimColour() { return Colour::FileName; } #ifdef CATCH_PLATFORM_MAC static const char* failedString() { return "FAILED"; } static const char* passedString() { return "PASSED"; } #else static const char* failedString() { return "failed"; } static const char* passedString() { return "passed"; } #endif void printSourceInfo() const { Colour colourGuard( Colour::FileName ); stream << result.getSourceInfo() << ':'; } void printResultType( Colour::Code colour, std::string const& passOrFail ) const { if( !passOrFail.empty() ) { { Colour colourGuard( colour ); stream << ' ' << passOrFail; } stream << ':'; } } void printIssue( std::string const& issue ) const { stream << ' ' << issue; } void printExpressionWas() { if( result.hasExpression() ) { stream << ';'; { Colour colour( dimColour() ); stream << " expression was:"; } printOriginalExpression(); } } void printOriginalExpression() const { if( result.hasExpression() ) { stream << ' ' << result.getExpression(); } } void printReconstructedExpression() const { if( result.hasExpandedExpression() ) { { Colour colour( dimColour() ); stream << " for: "; } stream << result.getExpandedExpression(); } } void printMessage() { if ( itMessage != messages.end() ) { stream << " '" << itMessage->message << '\''; ++itMessage; } } void printRemainingMessages( Colour::Code colour = dimColour() ) { if ( itMessage == messages.end() ) return; // using messages.end() directly yields compilation error: std::vector::const_iterator itEnd = messages.end(); const std::size_t N = static_cast( std::distance( itMessage, itEnd ) ); { Colour colourGuard( colour ); stream << " with " << pluralise( N, "message" ) << ':'; } for(; itMessage != itEnd; ) { // If this assertion is a warning ignore any INFO messages if( printInfoMessages || itMessage->type != ResultWas::Info ) { stream << " '" << itMessage->message << '\''; if ( ++itMessage != itEnd ) { Colour colourGuard( dimColour() ); stream << " and"; } } } } private: std::ostream& stream; AssertionStats const& stats; AssertionResult const& result; std::vector messages; std::vector::const_iterator itMessage; bool printInfoMessages; }; // Colour, message variants: // - white: No tests ran. // - red: Failed [both/all] N test cases, failed [both/all] M assertions. // - white: Passed [both/all] N test cases (no assertions). // - red: Failed N tests cases, failed M assertions. // - green: Passed [both/all] N tests cases with M assertions. std::string bothOrAll( std::size_t count ) const { return count == 1 ? std::string() : count == 2 ? "both " : "all " ; } void printTotals( const Totals& totals ) const { if( totals.testCases.total() == 0 ) { stream << "No tests ran."; } else if( totals.testCases.failed == totals.testCases.total() ) { Colour colour( Colour::ResultError ); const std::string qualify_assertions_failed = totals.assertions.failed == totals.assertions.total() ? bothOrAll( totals.assertions.failed ) : std::string(); stream << "Failed " << bothOrAll( totals.testCases.failed ) << pluralise( totals.testCases.failed, "test case" ) << ", " "failed " << qualify_assertions_failed << pluralise( totals.assertions.failed, "assertion" ) << '.'; } else if( totals.assertions.total() == 0 ) { stream << "Passed " << bothOrAll( totals.testCases.total() ) << pluralise( totals.testCases.total(), "test case" ) << " (no assertions)."; } else if( totals.assertions.failed ) { Colour colour( Colour::ResultError ); stream << "Failed " << pluralise( totals.testCases.failed, "test case" ) << ", " "failed " << pluralise( totals.assertions.failed, "assertion" ) << '.'; } else { Colour colour( Colour::ResultSuccess ); stream << "Passed " << bothOrAll( totals.testCases.passed ) << pluralise( totals.testCases.passed, "test case" ) << " with " << pluralise( totals.assertions.passed, "assertion" ) << '.'; } } }; INTERNAL_CATCH_REGISTER_REPORTER( "compact", CompactReporter ) } // end namespace Catch namespace Catch { // These are all here to avoid warnings about not having any out of line // virtual methods NonCopyable::~NonCopyable() {} IShared::~IShared() {} IStream::~IStream() CATCH_NOEXCEPT {} FileStream::~FileStream() CATCH_NOEXCEPT {} CoutStream::~CoutStream() CATCH_NOEXCEPT {} DebugOutStream::~DebugOutStream() CATCH_NOEXCEPT {} StreamBufBase::~StreamBufBase() CATCH_NOEXCEPT {} IContext::~IContext() {} IResultCapture::~IResultCapture() {} ITestCase::~ITestCase() {} ITestCaseRegistry::~ITestCaseRegistry() {} IRegistryHub::~IRegistryHub() {} IMutableRegistryHub::~IMutableRegistryHub() {} IExceptionTranslator::~IExceptionTranslator() {} IExceptionTranslatorRegistry::~IExceptionTranslatorRegistry() {} IReporter::~IReporter() {} IReporterFactory::~IReporterFactory() {} IReporterRegistry::~IReporterRegistry() {} IStreamingReporter::~IStreamingReporter() {} AssertionStats::~AssertionStats() {} SectionStats::~SectionStats() {} TestCaseStats::~TestCaseStats() {} TestGroupStats::~TestGroupStats() {} TestRunStats::~TestRunStats() {} CumulativeReporterBase::SectionNode::~SectionNode() {} CumulativeReporterBase::~CumulativeReporterBase() {} StreamingReporterBase::~StreamingReporterBase() {} ConsoleReporter::~ConsoleReporter() {} CompactReporter::~CompactReporter() {} IRunner::~IRunner() {} IMutableContext::~IMutableContext() {} IConfig::~IConfig() {} XmlReporter::~XmlReporter() {} JunitReporter::~JunitReporter() {} TestRegistry::~TestRegistry() {} FreeFunctionTestCase::~FreeFunctionTestCase() {} IGeneratorInfo::~IGeneratorInfo() {} IGeneratorsForTest::~IGeneratorsForTest() {} WildcardPattern::~WildcardPattern() {} TestSpec::Pattern::~Pattern() {} TestSpec::NamePattern::~NamePattern() {} TestSpec::TagPattern::~TagPattern() {} TestSpec::ExcludedPattern::~ExcludedPattern() {} Matchers::Impl::MatcherUntypedBase::~MatcherUntypedBase() {} void Config::dummy() {} namespace TestCaseTracking { ITracker::~ITracker() {} TrackerBase::~TrackerBase() {} SectionTracker::~SectionTracker() {} IndexTracker::~IndexTracker() {} } } #ifdef __clang__ # pragma clang diagnostic pop #endif #endif #ifdef CATCH_CONFIG_MAIN // #included from: internal/catch_default_main.hpp #define TWOBLUECUBES_CATCH_DEFAULT_MAIN_HPP_INCLUDED #ifndef __OBJC__ #if defined(WIN32) && defined(_UNICODE) && !defined(DO_NOT_USE_WMAIN) // Standard C/C++ Win32 Unicode wmain entry point extern "C" int wmain (int argc, wchar_t * argv[], wchar_t * []) { #else // Standard C/C++ main entry point int main (int argc, char * argv[]) { #endif int result = Catch::Session().run( argc, argv ); return ( result < 0xff ? result : 0xff ); } #else // __OBJC__ // Objective-C entry point int main (int argc, char * const argv[]) { #if !CATCH_ARC_ENABLED NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init]; #endif Catch::registerTestMethods(); int result = Catch::Session().run( argc, (char* const*)argv ); #if !CATCH_ARC_ENABLED [pool drain]; #endif return ( result < 0xff ? result : 0xff ); } #endif // __OBJC__ #endif #ifdef CLARA_CONFIG_MAIN_NOT_DEFINED # undef CLARA_CONFIG_MAIN #endif ////// // If this config identifier is defined then all CATCH macros are prefixed with CATCH_ #ifdef CATCH_CONFIG_PREFIX_ALL #if defined(CATCH_CONFIG_FAST_COMPILE) #define CATCH_REQUIRE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "CATCH_REQUIRE", Catch::ResultDisposition::Normal, expr ) #define CATCH_REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "CATCH_REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #else #define CATCH_REQUIRE( expr ) INTERNAL_CATCH_TEST( "CATCH_REQUIRE", Catch::ResultDisposition::Normal, expr ) #define CATCH_REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST( "CATCH_REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #endif #define CATCH_REQUIRE_THROWS( expr ) INTERNAL_CATCH_THROWS( "CATCH_REQUIRE_THROWS", Catch::ResultDisposition::Normal, "", expr ) #define CATCH_REQUIRE_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CATCH_REQUIRE_THROWS_AS", exceptionType, Catch::ResultDisposition::Normal, expr ) #define CATCH_REQUIRE_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CATCH_REQUIRE_THROWS_WITH", Catch::ResultDisposition::Normal, matcher, expr ) #define CATCH_REQUIRE_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CATCH_REQUIRE_NOTHROW", Catch::ResultDisposition::Normal, expr ) #define CATCH_CHECK( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_FALSE( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK_FALSE", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::FalseTest, expr ) #define CATCH_CHECKED_IF( expr ) INTERNAL_CATCH_IF( "CATCH_CHECKED_IF", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECKED_ELSE( expr ) INTERNAL_CATCH_ELSE( "CATCH_CHECKED_ELSE", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_NOFAIL( expr ) INTERNAL_CATCH_TEST( "CATCH_CHECK_NOFAIL", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::SuppressFail, expr ) #define CATCH_CHECK_THROWS( expr ) INTERNAL_CATCH_THROWS( "CATCH_CHECK_THROWS", Catch::ResultDisposition::ContinueOnFailure, "", expr ) #define CATCH_CHECK_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CATCH_CHECK_THROWS_AS", exceptionType, Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CATCH_CHECK_THROWS_WITH", Catch::ResultDisposition::ContinueOnFailure, matcher, expr ) #define CATCH_CHECK_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CATCH_CHECK_NOTHROW", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CATCH_CHECK_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CATCH_CHECK_THAT", matcher, Catch::ResultDisposition::ContinueOnFailure, arg ) #if defined(CATCH_CONFIG_FAST_COMPILE) #define CATCH_REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT_NO_TRY( "CATCH_REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #else #define CATCH_REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CATCH_REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #endif #define CATCH_INFO( msg ) INTERNAL_CATCH_INFO( "CATCH_INFO", msg ) #define CATCH_WARN( msg ) INTERNAL_CATCH_MSG( "CATCH_WARN", Catch::ResultWas::Warning, Catch::ResultDisposition::ContinueOnFailure, msg ) #define CATCH_SCOPED_INFO( msg ) INTERNAL_CATCH_INFO( "CATCH_INFO", msg ) #define CATCH_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CATCH_CAPTURE", #msg " := " << Catch::toString(msg) ) #define CATCH_SCOPED_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CATCH_CAPTURE", #msg " := " << Catch::toString(msg) ) #ifdef CATCH_CONFIG_VARIADIC_MACROS #define CATCH_TEST_CASE( ... ) INTERNAL_CATCH_TESTCASE( __VA_ARGS__ ) #define CATCH_TEST_CASE_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, __VA_ARGS__ ) #define CATCH_METHOD_AS_TEST_CASE( method, ... ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, __VA_ARGS__ ) #define CATCH_REGISTER_TEST_CASE( Function, ... ) INTERNAL_CATCH_REGISTER_TESTCASE( Function, __VA_ARGS__ ) #define CATCH_SECTION( ... ) INTERNAL_CATCH_SECTION( __VA_ARGS__ ) #define CATCH_FAIL( ... ) INTERNAL_CATCH_MSG( "CATCH_FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, __VA_ARGS__ ) #define CATCH_FAIL_CHECK( ... ) INTERNAL_CATCH_MSG( "CATCH_FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #define CATCH_SUCCEED( ... ) INTERNAL_CATCH_MSG( "CATCH_SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #else #define CATCH_TEST_CASE( name, description ) INTERNAL_CATCH_TESTCASE( name, description ) #define CATCH_TEST_CASE_METHOD( className, name, description ) INTERNAL_CATCH_TEST_CASE_METHOD( className, name, description ) #define CATCH_METHOD_AS_TEST_CASE( method, name, description ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, name, description ) #define CATCH_REGISTER_TEST_CASE( function, name, description ) INTERNAL_CATCH_REGISTER_TESTCASE( function, name, description ) #define CATCH_SECTION( name, description ) INTERNAL_CATCH_SECTION( name, description ) #define CATCH_FAIL( msg ) INTERNAL_CATCH_MSG( "CATCH_FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, msg ) #define CATCH_FAIL_CHECK( msg ) INTERNAL_CATCH_MSG( "CATCH_FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, msg ) #define CATCH_SUCCEED( msg ) INTERNAL_CATCH_MSG( "CATCH_SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, msg ) #endif #define CATCH_ANON_TEST_CASE() INTERNAL_CATCH_TESTCASE( "", "" ) #define CATCH_REGISTER_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) #define CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) #define CATCH_GENERATE( expr) INTERNAL_CATCH_GENERATE( expr ) // "BDD-style" convenience wrappers #ifdef CATCH_CONFIG_VARIADIC_MACROS #define CATCH_SCENARIO( ... ) CATCH_TEST_CASE( "Scenario: " __VA_ARGS__ ) #define CATCH_SCENARIO_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " __VA_ARGS__ ) #else #define CATCH_SCENARIO( name, tags ) CATCH_TEST_CASE( "Scenario: " name, tags ) #define CATCH_SCENARIO_METHOD( className, name, tags ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " name, tags ) #endif #define CATCH_GIVEN( desc ) CATCH_SECTION( std::string( "Given: ") + desc, "" ) #define CATCH_WHEN( desc ) CATCH_SECTION( std::string( " When: ") + desc, "" ) #define CATCH_AND_WHEN( desc ) CATCH_SECTION( std::string( " And: ") + desc, "" ) #define CATCH_THEN( desc ) CATCH_SECTION( std::string( " Then: ") + desc, "" ) #define CATCH_AND_THEN( desc ) CATCH_SECTION( std::string( " And: ") + desc, "" ) // If CATCH_CONFIG_PREFIX_ALL is not defined then the CATCH_ prefix is not required #else #if defined(CATCH_CONFIG_FAST_COMPILE) #define REQUIRE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "REQUIRE", Catch::ResultDisposition::Normal, expr ) #define REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST_NO_TRY( "REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #else #define REQUIRE( expr ) INTERNAL_CATCH_TEST( "REQUIRE", Catch::ResultDisposition::Normal, expr ) #define REQUIRE_FALSE( expr ) INTERNAL_CATCH_TEST( "REQUIRE_FALSE", Catch::ResultDisposition::Normal | Catch::ResultDisposition::FalseTest, expr ) #endif #define REQUIRE_THROWS( expr ) INTERNAL_CATCH_THROWS( "REQUIRE_THROWS", Catch::ResultDisposition::Normal, "", expr ) #define REQUIRE_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "REQUIRE_THROWS_AS", exceptionType, Catch::ResultDisposition::Normal, expr ) #define REQUIRE_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "REQUIRE_THROWS_WITH", Catch::ResultDisposition::Normal, matcher, expr ) #define REQUIRE_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "REQUIRE_NOTHROW", Catch::ResultDisposition::Normal, expr ) #define CHECK( expr ) INTERNAL_CATCH_TEST( "CHECK", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_FALSE( expr ) INTERNAL_CATCH_TEST( "CHECK_FALSE", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::FalseTest, expr ) #define CHECKED_IF( expr ) INTERNAL_CATCH_IF( "CHECKED_IF", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECKED_ELSE( expr ) INTERNAL_CATCH_ELSE( "CHECKED_ELSE", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_NOFAIL( expr ) INTERNAL_CATCH_TEST( "CHECK_NOFAIL", Catch::ResultDisposition::ContinueOnFailure | Catch::ResultDisposition::SuppressFail, expr ) #define CHECK_THROWS( expr ) INTERNAL_CATCH_THROWS( "CHECK_THROWS", Catch::ResultDisposition::ContinueOnFailure, "", expr ) #define CHECK_THROWS_AS( expr, exceptionType ) INTERNAL_CATCH_THROWS_AS( "CHECK_THROWS_AS", exceptionType, Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_THROWS_WITH( expr, matcher ) INTERNAL_CATCH_THROWS( "CHECK_THROWS_WITH", Catch::ResultDisposition::ContinueOnFailure, matcher, expr ) #define CHECK_NOTHROW( expr ) INTERNAL_CATCH_NO_THROW( "CHECK_NOTHROW", Catch::ResultDisposition::ContinueOnFailure, expr ) #define CHECK_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "CHECK_THAT", matcher, Catch::ResultDisposition::ContinueOnFailure, arg ) #if defined(CATCH_CONFIG_FAST_COMPILE) #define REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT_NO_TRY( "REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #else #define REQUIRE_THAT( arg, matcher ) INTERNAL_CHECK_THAT( "REQUIRE_THAT", matcher, Catch::ResultDisposition::Normal, arg ) #endif #define INFO( msg ) INTERNAL_CATCH_INFO( "INFO", msg ) #define WARN( msg ) INTERNAL_CATCH_MSG( "WARN", Catch::ResultWas::Warning, Catch::ResultDisposition::ContinueOnFailure, msg ) #define SCOPED_INFO( msg ) INTERNAL_CATCH_INFO( "INFO", msg ) #define CAPTURE( msg ) INTERNAL_CATCH_INFO( "CAPTURE", #msg " := " << Catch::toString(msg) ) #define SCOPED_CAPTURE( msg ) INTERNAL_CATCH_INFO( "CAPTURE", #msg " := " << Catch::toString(msg) ) #ifdef CATCH_CONFIG_VARIADIC_MACROS #define TEST_CASE( ... ) INTERNAL_CATCH_TESTCASE( __VA_ARGS__ ) #define TEST_CASE_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, __VA_ARGS__ ) #define METHOD_AS_TEST_CASE( method, ... ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, __VA_ARGS__ ) #define REGISTER_TEST_CASE( Function, ... ) INTERNAL_CATCH_REGISTER_TESTCASE( Function, __VA_ARGS__ ) #define SECTION( ... ) INTERNAL_CATCH_SECTION( __VA_ARGS__ ) #define FAIL( ... ) INTERNAL_CATCH_MSG( "FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, __VA_ARGS__ ) #define FAIL_CHECK( ... ) INTERNAL_CATCH_MSG( "FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #define SUCCEED( ... ) INTERNAL_CATCH_MSG( "SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, __VA_ARGS__ ) #else #define TEST_CASE( name, description ) INTERNAL_CATCH_TESTCASE( name, description ) #define TEST_CASE_METHOD( className, name, description ) INTERNAL_CATCH_TEST_CASE_METHOD( className, name, description ) #define METHOD_AS_TEST_CASE( method, name, description ) INTERNAL_CATCH_METHOD_AS_TEST_CASE( method, name, description ) #define REGISTER_TEST_CASE( method, name, description ) INTERNAL_CATCH_REGISTER_TESTCASE( method, name, description ) #define SECTION( name, description ) INTERNAL_CATCH_SECTION( name, description ) #define FAIL( msg ) INTERNAL_CATCH_MSG( "FAIL", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::Normal, msg ) #define FAIL_CHECK( msg ) INTERNAL_CATCH_MSG( "FAIL_CHECK", Catch::ResultWas::ExplicitFailure, Catch::ResultDisposition::ContinueOnFailure, msg ) #define SUCCEED( msg ) INTERNAL_CATCH_MSG( "SUCCEED", Catch::ResultWas::Ok, Catch::ResultDisposition::ContinueOnFailure, msg ) #endif #define ANON_TEST_CASE() INTERNAL_CATCH_TESTCASE( "", "" ) #define REGISTER_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_REPORTER( name, reporterType ) #define REGISTER_LEGACY_REPORTER( name, reporterType ) INTERNAL_CATCH_REGISTER_LEGACY_REPORTER( name, reporterType ) #define GENERATE( expr) INTERNAL_CATCH_GENERATE( expr ) #endif #define CATCH_TRANSLATE_EXCEPTION( signature ) INTERNAL_CATCH_TRANSLATE_EXCEPTION( signature ) // "BDD-style" convenience wrappers #ifdef CATCH_CONFIG_VARIADIC_MACROS #define SCENARIO( ... ) TEST_CASE( "Scenario: " __VA_ARGS__ ) #define SCENARIO_METHOD( className, ... ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " __VA_ARGS__ ) #else #define SCENARIO( name, tags ) TEST_CASE( "Scenario: " name, tags ) #define SCENARIO_METHOD( className, name, tags ) INTERNAL_CATCH_TEST_CASE_METHOD( className, "Scenario: " name, tags ) #endif #define GIVEN( desc ) SECTION( std::string(" Given: ") + desc, "" ) #define WHEN( desc ) SECTION( std::string(" When: ") + desc, "" ) #define AND_WHEN( desc ) SECTION( std::string("And when: ") + desc, "" ) #define THEN( desc ) SECTION( std::string(" Then: ") + desc, "" ) #define AND_THEN( desc ) SECTION( std::string(" And: ") + desc, "" ) using Catch::Detail::Approx; // #included from: internal/catch_reenable_warnings.h #define TWOBLUECUBES_CATCH_REENABLE_WARNINGS_H_INCLUDED #ifdef __clang__ # ifdef __ICC // icpc defines the __clang__ macro # pragma warning(pop) # else # pragma clang diagnostic pop # endif #elif defined __GNUC__ # pragma GCC diagnostic pop #endif #endif // TWOBLUECUBES_SINGLE_INCLUDE_CATCH_HPP_INCLUDED testthat/inst/include/testthat/testthat.h0000644000176200001440000001177415047626322020404 0ustar liggesusers#ifndef TESTTHAT_HPP #define TESTTHAT_HPP #define TESTTHAT_TOKEN_PASTE_IMPL(__X__, __Y__) __X__ ## __Y__ #define TESTTHAT_TOKEN_PASTE(__X__, __Y__) TESTTHAT_TOKEN_PASTE_IMPL(__X__, __Y__) #define TESTTHAT_DISABLED_FUNCTION \ static void TESTTHAT_TOKEN_PASTE(testthat_disabled_test_, __LINE__) () /** * Conditionally enable or disable 'testthat' + 'Catch'. * Force 'testthat' to be enabled by defining TESTTHAT_ENABLED. * Force 'testthat' to be disabled by defining TESTTHAT_DISABLED. * TESTTHAT_DISABLED takes precedence. * 'testthat' is disabled on Solaris by default. */ #if defined(__GNUC__) || defined(__clang__) # define TESTTHAT_ENABLED #endif #if defined(__SUNPRO_C) || defined(__SUNPRO_CC) || defined(__sun) || defined(__SVR4) # define TESTTHAT_DISABLED #endif #ifndef TESTTHAT_ENABLED # define TESTTHAT_DISABLED #endif /* * Hide symbols containing static members on gcc, to work around issues * with DLL unload due to static members in inline functions. This seems to only * affect Linux. We never define this attribute on Windows, as MinGW has a known * issue with this visibility attribute and ignores it with a warning. * https://github.com/r-lib/devtools/issues/1832 * https://github.com/r-lib/testthat/issues/1672 */ #if (defined(__GNUC__) && !defined(__MINGW32__)) || defined(__clang__) # define TESTTHAT_ATTRIBUTE_HIDDEN __attribute__ ((visibility("hidden"))) #else # define TESTTHAT_ATTRIBUTE_HIDDEN #endif #ifndef TESTTHAT_DISABLED # define CATCH_CONFIG_PREFIX_ALL # define CATCH_CONFIG_NOSTDOUT # ifdef TESTTHAT_TEST_RUNNER # define CATCH_CONFIG_RUNNER # endif # include // CHAR_MAX # include // EOF # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wparentheses" # endif namespace Catch { // Avoid 'R CMD check' warnings related to the use of 'std::rand()' and // 'std::srand()'. Since we don't call any Catch APIs that use these // functions, it suffices to just override them in the Catch namespace. inline void srand(unsigned) {} inline int rand() { return 42; } // Catch has calls to 'exit' on failure, which upsets R CMD check. // We won't bump into them during normal test execution so just override // it in the Catch namespace before we include 'catch'. inline void exit(int) throw() {} } # include "vendor/catch.h" // Implement an output stream that avoids writing to stdout / stderr. extern "C" void Rprintf(const char*, ...); extern "C" void R_FlushConsole(); namespace testthat { class r_streambuf : public std::streambuf { public: r_streambuf() {} protected: virtual std::streamsize xsputn(const char* s, std::streamsize n) { if (n == 1) Rprintf("%c", *s); else Rprintf("%.*s", static_cast(n), s); return n; } virtual int overflow(int c = EOF) { if (c == EOF) return c; if (c > CHAR_MAX) return c; Rprintf("%c", (char) c); return c; } virtual int sync() { R_FlushConsole(); return 0; } }; class r_ostream : public std::ostream { public: r_ostream() : std::ostream(new r_streambuf) {} ~r_ostream() { delete rdbuf(); } }; // Allow client packages to access the Catch::Session // exported by testthat. # ifdef CATCH_CONFIG_RUNNER TESTTHAT_ATTRIBUTE_HIDDEN inline Catch::Session& catchSession() { static Catch::Session instance; return instance; } inline bool run_tests(bool use_xml) { if (use_xml) { const char* argv[] = {"catch", "-r", "xml"}; return catchSession().run(3, argv) == 0; } else { return catchSession().run() == 0; } } # endif // CATCH_CONFIG_RUNNER } // namespace testthat namespace Catch { TESTTHAT_ATTRIBUTE_HIDDEN inline std::ostream& cout() { static testthat::r_ostream instance; return instance; } TESTTHAT_ATTRIBUTE_HIDDEN inline std::ostream& cerr() { static testthat::r_ostream instance; return instance; } } // namespace Catch # ifdef TESTTHAT_TEST_RUNNER // ERROR will be redefined by R; avoid compiler warnings # ifdef ERROR # undef ERROR # endif # include # include extern "C" SEXP run_testthat_tests(SEXP use_xml_sxp) { bool use_xml = LOGICAL(use_xml_sxp)[0]; bool success = testthat::run_tests(use_xml); return Rf_ScalarLogical(success); } # endif // TESTTHAT_TEST_RUNNER # define context(__X__) CATCH_TEST_CASE(__X__ " | " __FILE__) # define test_that CATCH_SECTION # define expect_true CATCH_CHECK # define expect_false CATCH_CHECK_FALSE # define expect_error CATCH_CHECK_THROWS # define expect_error_as CATCH_CHECK_THROWS_AS #else // TESTTHAT_DISABLED # define context(__X__) TESTTHAT_DISABLED_FUNCTION # define test_that(__X__) if (false) # define expect_true(__X__) (void) (__X__) # define expect_false(__X__) (void) (__X__) # define expect_error(__X__) (void) (__X__) # define expect_error_as(__X__, __Y__) (void) (__X__) # ifdef TESTTHAT_TEST_RUNNER # include # include extern "C" SEXP run_testthat_tests() { return Rf_ScalarLogical(true); } # endif // TESTTHAT_TEST_RUNNER #endif // TESTTHAT_DISABLED #endif /* TESTTHAT_HPP */ testthat/inst/CITATION0000644000176200001440000000036115104404205014217 0ustar liggesusersbibentry( "Article", author = "Hadley Wickham", title = "testthat: Get Started with Testing", journal = "The R Journal", year = 2011, volume = 3, pages = "5--10", url = "https://journal.r-project.org/articles/RJ-2011-002/" ) testthat/inst/resources/0000755000176200001440000000000014164710002015074 5ustar liggesuserstestthat/inst/resources/test-example.cpp0000644000176200001440000000216012661230133020211 0ustar liggesusers/* * This file uses the Catch unit testing library, alongside * testthat's simple bindings, to test a C++ function. * * For your own packages, ensure that your test files are * placed within the `src/` folder, and that you include * `LinkingTo: testthat` within your DESCRIPTION file. */ // All test files should include the // header file. #include // Normally this would be a function from your package's // compiled library -- you might instead just include a header // file providing the definition, and let R CMD INSTALL // handle building and linking. int twoPlusTwo() { return 2 + 2; } // Initialize a unit test context. This is similar to how you // might begin an R test file with 'context()', expect the // associated context should be wrapped in braced. context("Sample unit tests") { // The format for specifying tests is similar to that of // testthat's R functions. Use 'test_that()' to define a // unit test, and use 'expect_true()' and 'expect_false()' // to test the desired conditions. test_that("two plus two equals four") { expect_true(twoPlusTwo() == 4); } } testthat/inst/resources/test-runner.cpp0000644000176200001440000000036712661230133020076 0ustar liggesusers/* * Please do not edit this file -- it ensures that your package will export a * 'run_testthat_tests()' C routine that can be used to run the Catch unit tests * available in your package. */ #define TESTTHAT_TEST_RUNNER #include testthat/inst/resources/test-cpp.R0000644000176200001440000000002414164710002016752 0ustar liggesusersrun_cpp_tests("%s") testthat/inst/resources/catch-routine-registration.R0000644000176200001440000000043514164710002022476 0ustar liggesusers# This dummy function definition is included with the package to ensure that # 'tools::package_native_routine_registration_skeleton()' generates the required # registration info for the 'run_testthat_tests' symbol. (function() { .Call("run_testthat_tests", FALSE, PACKAGE = "%s") }) testthat/inst/examples/0000755000176200001440000000000015130237654014713 5ustar liggesuserstestthat/inst/examples/test-success.R0000644000176200001440000000051414164710002017450 0ustar liggesuserstest_that("one plus one is two", { expect_equal(1 + 1, 2) }) test_that("you can skip tests if needed", { skip("This test hasn't been written yet") }) test_that("some tests have warnings", { expect_equal(log(-1), NaN) }) test_that("some more successes just to pad things out", { expect_true(TRUE) expect_false(FALSE) }) testthat/inst/examples/test-failure.R0000644000176200001440000000025014312145107017427 0ustar liggesusersplus <- function(x, y) 1 + 1 test_that("one plus one is two", { expect_equal(plus(1, 1), 2) }) test_that("two plus two is four", { expect_equal(plus(2, 2), 4) }) testthat/inst/doc/0000755000176200001440000000000015130237654013642 5ustar liggesuserstestthat/inst/doc/custom-expectation.R0000644000176200001440000001227315130237643017623 0ustar liggesusers## ----setup-------------------------------------------------------------------- library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") # Pretend we're snapshotting snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") ## ----------------------------------------------------------------------------- expect_df <- function(tbl) { expect_s3_class(tbl, "data.frame") } ## ----------------------------------------------------------------------------- # from tidytext expect_nrow <- function(tbl, n) { expect_s3_class(tbl, "data.frame") expect_equal(nrow(tbl), n) } ## ----------------------------------------------------------------------------- try({ test_that("success", { expect_nrow(mtcars, 32) }) test_that("failure 1", { expect_nrow(mtcars, 30) }) test_that("failure 2", { expect_nrow(matrix(1:5), 2) }) }) ## ----------------------------------------------------------------------------- expect_length <- function(object, n) { # 1. Capture object and label act <- quasi_label(rlang::enquo(object)) act_n <- length(act$val) if (act_n != n) { # 2. Fail if expectations are violated fail(c( sprintf("Expected %s to have length %i.", act$lab, n), sprintf("Actual length: %i.", act_n) )) } else { # 3. Pass if expectations are met pass() } # 4. Invisibly return the input value invisible(act$val) } ## ----------------------------------------------------------------------------- test_that("mtcars is a 13 row data frame", { mtcars |> expect_type("list") |> expect_s3_class("data.frame") |> expect_length(11) }) ## ----------------------------------------------------------------------------- test_that("expect_length works as expected", { x <- 1:10 expect_success(expect_length(x, 10)) expect_failure(expect_length(x, 11)) }) test_that("expect_length gives useful feedback", { x <- 1:10 expect_snapshot_failure(expect_length(x, 11)) }) ## ----------------------------------------------------------------------------- expect_length(mean, 1) ## ----------------------------------------------------------------------------- expect_vector_length <- function(object, n) { act <- quasi_label(rlang::enquo(object)) # It's non-trivial to check if an object is a vector in base R so we # use an rlang helper if (!rlang::is_vector(act$val)) { fail(c( sprintf("Expected %s to be a vector", act$lab), sprintf("Actual type: %s", typeof(act$val)) )) } else { act_n <- length(act$val) if (act_n != n) { fail(c( sprintf("Expected %s to have length %i.", act$lab, n), sprintf("Actual length: %i.", act_n) )) } else { pass() } } invisible(act$val) } ## ----------------------------------------------------------------------------- try({ expect_vector_length(mean, 1) expect_vector_length(mtcars, 15) }) ## ----------------------------------------------------------------------------- expect_s3_class <- function(object, class) { if (!rlang::is_string(class)) { rlang::abort("`class` must be a string.") } act <- quasi_label(rlang::enquo(object)) if (!is.object(act$val)) { fail(sprintf("Expected %s to be an object.", act$lab)) } else if (isS4(act$val)) { fail(c( sprintf("Expected %s to be an S3 object.", act$lab), "Actual OO type: S4" )) } else if (!inherits(act$val, class)) { fail(c( sprintf("Expected %s to inherit from %s.", act$lab, class), sprintf("Actual class: %s", class(act$val)) )) } else { pass() } invisible(act$val) } ## ----------------------------------------------------------------------------- try({ x1 <- 1:10 TestClass <- methods::setClass("Test", contains = "integer") x2 <- TestClass() x3 <- factor() expect_s3_class(x1, "integer") expect_s3_class(x2, "integer") expect_s3_class(x3, "integer") expect_s3_class(x3, "factor") }) ## ----------------------------------------------------------------------------- try({ expect_s3_class(x1, 1) }) ## ----------------------------------------------------------------------------- expect_s3_object <- function(object, class = NULL) { if (!rlang::is_string(class) && is.null(class)) { rlang::abort("`class` must be a string or NULL.") } act <- quasi_label(rlang::enquo(object)) if (!is.object(act$val)) { fail(sprintf("Expected %s to be an object.", act$lab)) } else if (isS4(act$val)) { fail(c( sprintf("Expected %s to be an S3 object.", act$lab), "Actual OO type: S4" )) } else if (!is.null(class) && !inherits(act$val, class)) { fail(c( sprintf("Expected %s to inherit from %s.", act$lab, class), sprintf("Actual class: %s", class(act$val)) )) } else { pass() } invisible(act$val) } ## ----------------------------------------------------------------------------- expect_length_ <- function(act, n, trace_env = caller_env()) { act_n <- length(act$val) if (act_n != n) { fail( sprintf("%s has length %i, not length %i.", act$lab, act_n, n), trace_env = trace_env ) } else { pass() } } expect_length <- function(object, n) { act <- quasi_label(rlang::enquo(object)) expect_length_(act, n) invisible(act$val) } testthat/inst/doc/custom-expectation.html0000644000176200001440000013341015130237643020363 0ustar liggesusers Custom expectations

    Custom expectations

    This vignette shows you how to write your own expectations. Custom expectations allow you to extend testthat to meet your own specialized testing needs, creating new expect_* functions that work exactly the same way as the built-ins. Custom expectations are particularly useful if you want to produce expectations tailored for domain-specific data structures, combine multiple checks into a single expectation, or create more actionable feedback when an expectation fails. You can use them within your package by putting them in a helper file, or share them with others by exporting them from your package.

    In this vignette, you’ll learn about the three-part structure of expectations, how to test your custom expectations, see a few examples, and, if you’re writing a lot of expectations, learn how to reduce repeated code.

    Do you need it?

    But before you read the rest of the vignette and dive into the full details of creating a 100% correct expectation, consider if you can get away with a simpler wrapper. If you’re just customising an existing expectation by changing some defaults, you’re fine:

    expect_df <- function(tbl) {
      expect_s3_class(tbl, "data.frame")
    }

    If you’re combining multiple expectations, you can introduce a subtle problem. For example, take this expectation from tidytext:

    # from tidytext
    expect_nrow <- function(tbl, n) {
      expect_s3_class(tbl, "data.frame")
      expect_equal(nrow(tbl), n)
    }

    If we use it in a test you can see there’s an issue:

    test_that("success", {
      expect_nrow(mtcars, 32)
    })
    #> Test passed with 2 successes 🎉.
    
    test_that("failure 1", {
      expect_nrow(mtcars, 30)
    })
    #> ── Failure: failure 1 ──────────────────────────────────────────────────────────
    #> Expected `nrow(tbl)` to equal `n`.
    #> Differences:
    #>   `actual`: 32.0
    #> `expected`: 30.0
    #> 
    #> Backtrace:
    #>     ▆
    #>  1. └─global expect_nrow(mtcars, 30)
    #>  2.   └─testthat::expect_equal(nrow(tbl), n)
    #> Error:
    #> ! Test failed with 1 failure and 1 success.
    
    test_that("failure 2", {
      expect_nrow(matrix(1:5), 2)
    })
    #> ── Failure: failure 2 ──────────────────────────────────────────────────────────
    #> Expected `tbl` to be an S3 object.
    #> Actual OO type: none.
    #> Backtrace:
    #>     ▆
    #>  1. └─global expect_nrow(matrix(1:5), 2)
    #>  2.   └─testthat::expect_s3_class(tbl, "data.frame")
    #> ── Failure: failure 2 ──────────────────────────────────────────────────────────
    #> Expected `nrow(tbl)` to equal `n`.
    #> Differences:
    #>   `actual`: 5.0
    #> `expected`: 2.0
    #> 
    #> Backtrace:
    #>     ▆
    #>  1. └─global expect_nrow(matrix(1:5), 2)
    #>  2.   └─testthat::expect_equal(nrow(tbl), n)
    #> Error:
    #> ! Test failed with 2 failures and 0 successes.

    Each of these tests contain a single expectation, but they report a total of two successes and failures. It would be confusing if testthat didn’t report these numbers correctly. But as a helper in your package, it’s probably not a big deal.

    You might also notice that these failures generate a backtrace whereas built-in expectations don’t. Again, it’s not a big deal because the backtrace is correct, it’s just not needed.

    These are both minor issues, so if they don’t bother you, you can save yourself some pain by not reading this vignette 😀.

    Expectation basics

    An expectation has four main parts, as illustrated by expect_length():

    expect_length <- function(object, n) {  
      # 1. Capture object and label
      act <- quasi_label(rlang::enquo(object))
      
      act_n <- length(act$val)
      if (act_n != n) {
        # 2. Fail if expectations are violated
        fail(c(
          sprintf("Expected %s to have length %i.", act$lab, n),
          sprintf("Actual length: %i.", act_n)
        ))
      } else {
        # 3. Pass if expectations are met
        pass()
      }
      
      # 4. Invisibly return the input value
      invisible(act$val)
    }

    The first step in any expectation is to use quasi_label() to capture a “labeled value”, i.e., a list that contains both the value ($val) for testing and a label ($lab) used to make failure messages as informative as possible. This is a pattern that exists for fairly esoteric reasons; you don’t need to understand it, just copy and paste it.

    Next you need to check each way that object could violate the expectation. In this case, there’s only one check, but in more complicated cases there can be many. Note the specific form of the failure message: the first element describes what we expected, and then the second line reports what we actually saw.

    If the object is as expected, call pass(). This ensures that a success will be registered in the test reporter.

    Otherwise, call fail(). This ensures that a failure will be registered in the test reporter. NB: unlike stop() or abort(), fail() signals a failure but allows code execution to continue, ensuring that one failure does not prevent subsequent expectations from running.

    Finally, return the input value (act$val) invisibly. This is good practice because expectations are called primarily for their side-effects (triggering a failure), and returning the value allows expectations to be piped together:

    test_that("mtcars is a 13 row data frame", {
      mtcars |>
        expect_type("list") |>
        expect_s3_class("data.frame") |> 
        expect_length(11)
    })
    #> Test passed with 3 successes 🌈.

    Testing your expectations

    Once you’ve written your expectation, you need to test it: expectations are functions that can have bugs, just like any other function, and it’s really important that they generate actionable failure messages. Luckily testthat comes with three expectations designed specifically to test expectations:

    • expect_success() checks that your expectation emits exactly one success and zero failures.
    • expect_failure() checks that your expectation emits exactly one failure and zero successes.
    • expect_snapshot_failure() captures the failure message in a snapshot, making it easier to review whether it’s useful.

    The first two expectations are particularly important because they ensure that your expectation always reports either a single success or a single failure. If it doesn’t, the end user is going to get confusing results in their test suite reports.

    test_that("expect_length works as expected", {
      x <- 1:10
      expect_success(expect_length(x, 10))
      expect_failure(expect_length(x, 11))
    })
    #> Test passed with 2 successes 🥳.
    
    test_that("expect_length gives useful feedback", {
      x <- 1:10
      expect_snapshot_failure(expect_length(x, 11))
    })
    #> ── Warning: expect_length gives useful feedback ────────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   expect_length(x, 11)
    #> Condition
    #>   Error:
    #>   ! Expected `x` to have length 11.
    #>   Actual length: 10.
    #> Test passed with 1 success 🎊.

    Examples

    The following sections show you a few more variations, loosely based on existing testthat expectations. These expectations were picked to show how you can generate actionable failures in slightly more complex situations.

    expect_vector_length()

    Let’s make expect_length() a bit more strict by also checking that the input is a vector. R is a bit unusual in that it gives a length to pretty much every object, and you can imagine not wanting code like the following to succeed, because it’s likely that the user passed the wrong object to the test.

    expect_length(mean, 1)

    To do this we’ll add an extra check that the input is either an atomic vector or a list:

    expect_vector_length <- function(object, n) {  
      act <- quasi_label(rlang::enquo(object))
    
      # It's non-trivial to check if an object is a vector in base R so we
      # use an rlang helper
      if (!rlang::is_vector(act$val)) {
        fail(c(
          sprintf("Expected %s to be a vector", act$lab),
          sprintf("Actual type: %s", typeof(act$val))
        ))
      } else {
        act_n <- length(act$val)
        if (act_n != n) {
          fail(c(
            sprintf("Expected %s to have length %i.", act$lab, n),
            sprintf("Actual length: %i.", act_n)
          ))
        } else {
          pass()
        }
      }
    
      invisible(act$val)
    }
    expect_vector_length(mean, 1)
    #> Error: Expected `mean` to be a vector
    #> Actual type: closure
    expect_vector_length(mtcars, 15)
    #> Error: Expected `mtcars` to have length 15.
    #> Actual length: 11.

    expect_s3_class()

    Or imagine you’re checking to see if an object inherits from an S3 class. R has a lot of different OO systems, and you want your failure messages to be as informative as possible, so before checking that the class matches, you probably want to check that the object is from the correct OO family.

    expect_s3_class <- function(object, class) {
      if (!rlang::is_string(class)) {
        rlang::abort("`class` must be a string.")
      }
    
      act <- quasi_label(rlang::enquo(object))
    
      if (!is.object(act$val)) {
        fail(sprintf("Expected %s to be an object.", act$lab))
      } else if (isS4(act$val)) {
        fail(c(
          sprintf("Expected %s to be an S3 object.", act$lab),
          "Actual OO type: S4"
        ))
      } else if (!inherits(act$val, class)) {
        fail(c(
          sprintf("Expected %s to inherit from %s.", act$lab, class),
          sprintf("Actual class: %s", class(act$val))
        ))
      } else {
        pass()
      }
    
      invisible(act$val)
    }
    x1 <- 1:10
    TestClass <- methods::setClass("Test", contains = "integer")
    x2 <- TestClass()
    x3 <- factor()
    
    expect_s3_class(x1, "integer")
    #> Error: Expected `x1` to be an object.
    expect_s3_class(x2, "integer")
    #> Error: Expected `x2` to be an S3 object.
    #> Actual OO type: S4
    expect_s3_class(x3, "integer")
    #> Error: Expected `x3` to inherit from integer.
    #> Actual class: factor
    expect_s3_class(x3, "factor")

    Note the variety of error messages. We always print what was expected, and where possible, also display what was actually received:

    • When object isn’t an object, we can only say what we expected.
    • When object is an S4 object, we can report that.
    • When inherits() is FALSE, we provide the actual class, since that’s most informative.

    The general principle is to tailor error messages to what the user can act on based on what you know about the input.

    Also note that I check that the class argument is a string. If it’s not a string, I throw an error. This is not a test failure; the user is calling the function incorrectly. In general, you should check the type of all arguments that affect the operation and error if they’re not what you expect.

    expect_s3_class(x1, 1)
    #> Error in `expect_s3_class()`:
    #> ! `class` must be a string.

    Optional class

    A common pattern in testthat’s own expectations it to use arguments to control the level of detail in the test. Here it would be nice if we check that an object is an S3 object without checking for a specific class. I think we could do that by renaming expect_s3_class() to expect_s3_object(). Now expect_s3_object(x) would verify that x is an S3 object, and expect_s3_object(x, class = "foo") to verify that x is an S3 object with the given class. The implementation of this is straightforward: we also allow class to be NULL and then only verify inheritance when non-NULL.

    expect_s3_object <- function(object, class = NULL) {
      if (!rlang::is_string(class) && is.null(class)) {
        rlang::abort("`class` must be a string or NULL.")
      }
    
      act <- quasi_label(rlang::enquo(object))
    
      if (!is.object(act$val)) {
        fail(sprintf("Expected %s to be an object.", act$lab))
      } else if (isS4(act$val)) {
        fail(c(
          sprintf("Expected %s to be an S3 object.", act$lab),
          "Actual OO type: S4"
        ))
      } else if (!is.null(class) && !inherits(act$val, class)) {
        fail(c(
          sprintf("Expected %s to inherit from %s.", act$lab, class),
          sprintf("Actual class: %s", class(act$val))
        ))
      } else {
        pass()
      }
    
      invisible(act$val)
    }

    Repeated code

    As you write more expectations, you might discover repeated code that you want to extract into a helper. Unfortunately, creating 100% correct helper functions is not straightforward in testthat because fail() captures the calling environment in order to give useful tracebacks, and testthat’s own expectations don’t expose this as an argument. Fortunately, getting this right is not critical (you’ll just get a slightly suboptimal traceback in the case of failure), so we don’t recommend bothering in most cases. We document it here, however, because it’s important to get it right in testthat itself.

    The key challenge is that fail() captures a trace_env, which should be the execution environment of the expectation. This usually works because the default value of trace_env is rlang::caller_env(). But when you introduce a helper, you’ll need to explicitly pass it along:

    expect_length_ <- function(act, n, trace_env = caller_env()) {
      act_n <- length(act$val)
      if (act_n != n) {
        fail(
          sprintf("%s has length %i, not length %i.", act$lab, act_n, n), 
          trace_env = trace_env
        )
      } else {
        pass()
      }
    }
    
    expect_length <- function(object, n) {  
      act <- quasi_label(rlang::enquo(object))
    
      expect_length_(act, n)
      invisible(act$val)
    }

    A few recommendations:

    • The helper shouldn’t be user-facing, so we give it a _ suffix to make that clear.
    • It’s typically easiest for a helper to take the labeled value produced by quasi_label().
    • Your helper should usually be called for its side effects (i.e. it calls fail() and pass()).
    • You should return invisible(act$val) from the parent expecatation as usual.

    Again, you’re probably not writing so many expectations that it makes sense for you to go to this effort, but it is important for testthat to get it right.

    testthat/inst/doc/test-fixtures.R0000644000176200001440000001241415130237653016614 0ustar liggesusers## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----------------------------------------------------------------------------- library(testthat) ## ----------------------------------------------------------------------------- test_that("print() respects digits option", { x <- 1.23456789 withr::local_options(digits = 1) expect_equal(capture.output(x), "[1] 1") withr::local_options(digits = 5) expect_equal(capture.output(x), "[1] 1.2346") }) ## ----------------------------------------------------------------------------- local_digits <- function(sig_digits, env = parent.frame()) { withr::local_options(digits = sig_digits, .local_envir = env) # mark that this function is called for its side-effects not its return value invisible() } ## ----include = FALSE---------------------------------------------------------- op <- options() ## ----------------------------------------------------------------------------- sloppy <- function(x, sig_digits) { options(digits = sig_digits) print(x) } pi sloppy(pi, 2) pi ## ----include = FALSE---------------------------------------------------------- options(op) ## ----------------------------------------------------------------------------- neat <- function(x, sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) print(x) } pi neat(pi, 2) pi ## ----------------------------------------------------------------------------- test_that("can print one digit of pi", { op <- options(digits = 1) on.exit(options(op), add = TRUE, after = FALSE) expect_output(print(pi), "3") }) pi ## ----eval = FALSE------------------------------------------------------------- # op <- options(digits = 1) # on.exit(options(op), add = TRUE, after = FALSE) ## ----------------------------------------------------------------------------- neat <- function(x, sig_digits) { op <- options(digits = sig_digits) withr::defer(options(op)) print(x) } ## ----eval = FALSE------------------------------------------------------------- # withr::defer(print("hi")) # #> Setting deferred event(s) on global environment. # #> * Execute (and clear) with `deferred_run()`. # #> * Clear (without executing) with `deferred_clear()`. # # withr::deferred_run() # #> [1] "hi" ## ----------------------------------------------------------------------------- local_digits <- function(sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) } neater <- function(x, sig_digits) { local_digits(1) print(x) } neater(pi) ## ----------------------------------------------------------------------------- local_digits <- function(sig_digits, env = parent.frame()) { op <- options(digits = sig_digits) withr::defer(options(op), env) } neater(pi) ## ----------------------------------------------------------------------------- test_that("withr lets us write custom helpers for local state manipulation", { local_digits(1) expect_output(print(exp(1)), "3") local_digits(3) expect_output(print(exp(1)), "2.72") }) print(exp(1)) ## ----------------------------------------------------------------------------- test_that("local_options() only affects a minimal amount of code", { withr::local_options(x = 1) expect_equal(getOption("x"), 1) local({ withr::local_options(x = 2) expect_equal(getOption("x"), 2) }) expect_equal(getOption("x"), 1) }) getOption("x") ## ----------------------------------------------------------------------------- message2 <- function(...) { if (!isTRUE(getOption("verbose"))) { return() } message(...) } ## ----------------------------------------------------------------------------- message3 <- function(..., verbose = getOption("verbose")) { if (!isTRUE(verbose)) { return() } message(...) } ## ----------------------------------------------------------------------------- test_that("message2() output depends on verbose option", { withr::local_options(verbose = TRUE) expect_message(message2("Hi!")) withr::local_options(verbose = FALSE) expect_message(message2("Hi!"), NA) }) ## ----eval = FALSE------------------------------------------------------------- # local_create_package <- function(dir = file_temp(), env = parent.frame()) { # old_project <- proj_get_() # # # create new folder and package # create_package(dir, open = FALSE) # A # withr::defer(fs::dir_delete(dir), envir = env) # -A # # # change working directory # withr::local_dir(dir, .local_envir = env) # B + -B # # # switch to new usethis project # proj_set(dir) # C # withr::defer(proj_set(old_project, force = TRUE), envir = env) # -C # # dir # } ## ----eval = FALSE------------------------------------------------------------- # test_that("use_roxygen_md() adds DESCRIPTION fields", { # pkg <- local_create_package() # use_roxygen_md() # # expect_true(uses_roxygen_md()) # expect_equal(desc::desc_get("Roxygen", pkg)[[1]], "list(markdown = TRUE)") # expect_true(desc::desc_has_fields("RoxygenNote", pkg)) # }) ## ----eval = FALSE------------------------------------------------------------- # # Run before any test # write.csv(mtcars, "mtcars.csv") # # # Run after all tests # withr::defer(unlink("mtcars.csv"), teardown_env()) testthat/inst/doc/special-files.R0000644000176200001440000000121415130237651016500 0ustar liggesusers## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(testthat) ## ----eval = FALSE------------------------------------------------------------- # op <- options(reprex.clipboard = FALSE, reprex.html_preview = FALSE) # # withr::defer(options(op), teardown_env()) ## ----eval = FALSE------------------------------------------------------------- # withr::local_options( # list(reprex.clipboard = FALSE, reprex.html_preview = FALSE), # .local_envir = teardown_env() # ) testthat/inst/doc/third-edition.Rmd0000644000176200001440000002315115072252215017046 0ustar liggesusers--- title: "testthat 3e" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{testthat 3e} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` testthat 3.0.0 introduces the idea of an "edition" of testthat. An edition is a bundle of behaviours that you have to explicitly choose to use, allowing us to make otherwise backward incompatible changes. This is particularly important for testthat since it has a very large number of packages that use it (almost 5,000 at last count). Choosing to use the 3rd edition allows you to use our latest recommendations for ongoing and new work, while historical packages continue to use the old behaviour. (We don't anticipate creating new editions very often, and they'll always be matched with major version, i.e. if there's another edition, it'll be the fourth edition and will come with testthat 4.0.0.) This vignette shows you how to activate the 3rd edition, introduces the main features, and discusses common challenges when upgrading a package. If you have a problem that this vignette doesn't cover, please let me know, as it's likely that the problem also affects others. ```{r, message = FALSE} library(testthat) local_edition(3) ``` ## Activating The usual way to activate the 3rd edition is to add a line to your `DESCRIPTION`: Config/testthat/edition: 3 This will activate the 3rd edition for every test in your package. You can also control the edition used for individual tests with `testthat::local_edition()`: ```{r} test_that("I can use the 3rd edition", { local_edition(3) expect_true(TRUE) }) ``` This is also useful if you've switched to the 3rd edition and have a couple of tests that fail. You can use `local_edition(2)` to revert back to the old behaviour, giving you some breathing room to figure out the underlying issue. ```{r} test_that("I want to use the 2nd edition", { local_edition(2) expect_true(TRUE) }) ``` ## Changes There are three major changes in the 3rd edition: - A number of outdated functions are now **deprecated**, so you'll be warned about them every time you run your tests (but they won't cause `R CMD check` to fail). - testthat no longer silently swallows **messages**; you now need to deliberately handle them. - `expect_equal()` and `expect_identical()` now use the [**waldo**](https://waldo.r-lib.org/) package instead of `identical()` and `all.equal()`. This makes them more consistent and provides an enhanced display of differences when a test fails. ### Deprecations A number of outdated functions have been deprecated. Most of these functions have not been recommended for a number of years, but before the introduction of the edition idea, I didn't have a good way of preventing people from using them without breaking a lot of code on CRAN. - `context()` is formally deprecated. testthat has been moving away from `context()` in favour of file names for quite some time, and now you'll be strongly encouraged remove these calls from your tests. - `expect_is()` is deprecated in favour of the more specific `expect_type()`, `expect_s3_class()`, and `expect_s4_class()`. This ensures that you check the expected class along with the expected OO system. - The very old `expect_that()` syntax is now deprecated. This was an overly clever API that I regretted even before the release of testthat 1.0.0. - `expect_equivalent()` has been deprecated since it is now equivalent (HA HA) to `expect_equal(ignore_attr = TRUE)`. - `setup()` and `teardown()` are deprecated in favour of test fixtures. See `vignette("test-fixtures")` for details. - `expect_known_output()`, `expect_known_value()`, `expect_known_hash()`, and `expect_equal_to_reference()` are all deprecated in favour of `expect_snapshot_output()` and `expect_snapshot_value()`. - `with_mock()` and `local_mock()` are deprecated; please use `with_mocked_bindings()` or `local_mocked_bindings()` instead. Fixing these deprecation warnings should be straightforward. ### Warnings In the second edition, `expect_warning()` swallows all warnings regardless of whether or not they match the `regexp` or `class`: ```{r} f <- function() { warning("First warning") warning("Second warning") warning("Third warning") } local_edition(2) expect_warning(f(), "First") ``` In the third edition, `expect_warning()` captures at most one warning so the others will bubble up: ```{r} local_edition(3) expect_warning(f(), "First") ``` You can either add additional expectations to catch these warnings, or silence them all with `suppressWarnings()`: ```{r} f() |> expect_warning("First") |> expect_warning("Second") |> expect_warning("Third") f() |> expect_warning("First") |> suppressWarnings() ``` Alternatively, you might want to capture them all in a snapshot test: ```{r} test_that("f() produces expected outputs/messages/warnings", { expect_snapshot(f()) }) ``` The same principle also applies to `expect_message()`, but message handling has changed in a more radical way, as described next. ### Messages For reasons that I can no longer remember, testthat silently ignores all messages. This is inconsistent with other types of output, so as of the 3rd edition, they now bubble up to your test results. You'll have to explicit ignore them with `suppressMessages()`, or if they're important, test for their presence with `expect_message()`. ### waldo Probably the biggest day-to-day difference (and the biggest reason to upgrade!) is the use of [`waldo::compare()`](https://waldo.r-lib.org/reference/compare.html) inside of `expect_equal()` and `expect_identical()`. The goal of waldo is to find and concisely describe the difference between a pair of R objects, and it's designed specifically to help you figure out what's gone wrong in your unit tests. ```{r, error = TRUE} f1 <- factor(letters[1:3]) f2 <- ordered(letters[1:3], levels = letters[1:4]) local_edition(2) expect_equal(f1, f2) local_edition(3) expect_equal(f1, f2) ``` waldo looks even better in your console because it carefully uses colours to help highlight the differences. The use of waldo also makes precise the difference between `expect_equal()` and `expect_identical()`: `expect_equal()` sets `tolerance` so that waldo will ignore small numerical differences arising from floating point computation. Otherwise the functions are identical (HA HA). This change is likely to result in the most work during an upgrade, because waldo can give slightly different results to both `identical()` and `all.equal()` in moderately common situations. I believe on the whole the differences are meaningful and useful, so you'll need to handle them by tweaking your tests. The following changes are most likely to affect you: - `expect_equal()` previously ignored the environments of formulas and functions. This is most like to arise if you are testing models. It's worth thinking about what the correct values should be, but if that is to annoying you can opt out of the comparison with `ignore_function_env` or `ignore_formula_env`. - `expect_equal()` used a combination of `all.equal()` and a home-grown `testthat::compare()` which unfortunately used a slightly different definition of tolerance. Now `expect_equal()` always uses the same definition of tolerance everywhere, which may require tweaks to your existing tolerance values. - `expect_equal()` previously ignored timezone differences when one object had the current timezone set implicitly (with `""`) and the other had it set explicitly: ```{r, error = TRUE} dt1 <- dt2 <- ISOdatetime(2020, 1, 2, 3, 4, 0) attr(dt1, "tzone") <- "" attr(dt2, "tzone") <- Sys.timezone() local_edition(2) expect_equal(dt1, dt2) local_edition(3) expect_equal(dt1, dt2) ``` ### Reproducible output In the third edition, `test_that()` automatically calls `local_reproducible_output()` which automatically sets a number of options and environment variables to ensure output is as reproducible across systems. This includes setting: - `options(crayon.enabled = FALSE)` and `options(cli.unicode = FALSE)` so that the crayon and cli packages produce raw ASCII output. - `Sys.setLocale("LC_COLLATE" = "C")` so that sorting a character vector returns the same order regardless of the system language. - `options(width = 80)` so print methods always generate the same output regardless of your actual console width. See the documentation for more details. ## Upgrading The changes lend themselves to the following workflow for upgrading from the 2nd to the 3rd edition: 1. Activate edition 3. You can let [`usethis::use_testthat(3)`](https://usethis.r-lib.org/reference/use_testthat.html) do this for you. 2. Remove or replace deprecated functions, going over the list of above. 3. If your output got noisy, quiet things down by either capturing or suppressing warnings and messages. 4. Inspect test outputs if objects are not "all equal" anymore. ## Alternatives You might wonder why we came up with the idea of an "edition", rather than creating a new package like testthat3. We decided against making a new package because the 2nd and 3rd edition share a very large amount of code, so making a new package would have substantially increased the maintenance burden: the majority of bugs would've needed to be fixed in two places. If you're a programmer in other languages, you might wonder why we can't rely on [semantic versioning](https://semver.org). The main reason is that CRAN checks all packages that use testthat with the latest version of testthat, so simply incrementing the major version number doesn't actually help with reducing R CMD check failures on CRAN. testthat/inst/doc/snapshotting.Rmd0000644000176200001440000003647315067547665017063 0ustar liggesusers--- title: "Snapshot tests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Snapshot tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) set.seed(1014) ``` The goal of a unit test is to record the expected output of a function using code. This is a powerful technique because it not only ensures that code doesn't change unexpectedly, but it also expresses the desired behavior in a way that a human can understand. However, it's not always convenient to record the expected behavior with code. Some challenges include: - Text output that includes many characters like quotes and newlines that require special handling in a string. - Output that is large, making it painful to define the reference output and bloating the size of the test file. - Binary formats like plots or images, which are very difficult to describe in code: e.g., the plot looks right, the error message is actionable, or the print method uses color effectively. For these situations, testthat provides an alternative mechanism: snapshot tests. Instead of using code to describe expected output, snapshot tests (also known as [golden tests](https://ro-che.info/articles/2017-12-04-golden-tests)) record results in a separate human-readable file. Snapshot tests in testthat are inspired primarily by [Jest](https://jestjs.io/docs/en/snapshot-testing), thanks to a number of very useful discussions with Joe Cheng. ```{r setup} library(testthat) ``` ```{r include = FALSE} snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") ``` ## Basic workflow We'll illustrate the basic workflow with a simple function that generates HTML bullets. It can optionally include an `id` attribute, which allows you to construct a link directly to that list. ```{r} bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } cat(bullets("a", id = "x")) ``` Testing this simple function is relatively painful. To write the test you have to carefully escape the newlines and quotes. And then when you re-read the test in the future, all that escaping makes it hard to tell exactly what it's supposed to return. ```{r} test_that("bullets", { expect_equal(bullets("a"), "
      \n
    • a
    • \n
    \n") expect_equal(bullets("a", id = "x"), "
      \n
    • a
    • \n
    \n") }) ``` This is a great place to use snapshot testing. To do this we make two changes to our code: - We use `expect_snapshot()` instead of `expect_equal()` - We wrap the call in `cat()` (to avoid `[1]` in the output, like in the first interactive example above). This yields the following test: ```{r} test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` ```{r, include = FALSE} # Reset snapshot test snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ``` When we run the test for the first time, it automatically generates reference output and prints it, so that you can visually confirm that it's correct. The output is automatically saved in `_snaps/{name}.md`. The name of the snapshot matches your test file name --- e.g. if your test is `test-pizza.R` then your snapshot will be saved in `tests/testthat/_snaps/pizza.md`. As the file name suggests, this is a markdown file, which I'll explain shortly. If you run the test again, it'll succeed: ```{r} test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` ```{r} #| include: false # finalise snapshot to in order to get an error snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ``` But if you change the underlying code, say to tweak the indenting, the test will fail: ```{r, error = TRUE} bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ``` If this is a deliberate change, you can follow the advice in the message and update the snapshots for that file by running `snapshot_accept("pizza")`; otherwise, you can fix the bug and your tests will pass once more. (You can also accept snapshots for all files with `snapshot_accept()`.) If you delete the test, the corresponding snapshot will be removed the next time you run the tests. If you delete all snapshots in the file, the entire snapshot file will be deleted the next time you run all the tests. ### Snapshot format Snapshots are recorded using a subset of markdown. You might wonder why we use markdown. We use it because it's important that snapshots be human-readable because humans have to read them during code reviews. Reviewers often don't run your code but still want to understand the changes. Here's the snapshot file generated by the test above: ``` md # bullets
    • a
    ---
    • a
    ``` Each test starts with `# {test name}`, a level 1 heading. Within a test, each snapshot expectation is indented by four spaces, i.e., as code, and they are separated by `---`, a horizontal rule. ### Interactive usage Because the snapshot output uses the name of the current test file and the current test, snapshot expectations don't really work when run interactively at the console. Since they can't automatically find the reference output, they instead just print the current value for manual inspection. ## Testing errors So far we've focused on snapshot tests for output printed to the console. But `expect_snapshot()` also captures messages, errors, and warnings[^1]. Messages and warnings are straightforward, but capturing errors is *slightly* more difficult because `expect_snapshot()` will fail if there's an error: ```{r} #| error: true test_that("you can't add a number and a letter", { expect_snapshot(1 + "a") }) ``` This is a safety valve that ensures that you don't accidentally write broken code. To deliberately snapshot an error, you'll have to specifically request it with `error = TRUE`: ```{r} test_that("you can't add a number and a letter", { expect_snapshot(1 + "a", error = TRUE) }) ``` When the code gets longer, I like to put `error = TRUE` up front so it's a little more obvious: ```{r} test_that("you can't add weird things", { expect_snapshot(error = TRUE, { 1 + "a" mtcars + iris Sys.Date() + factor() }) }) ``` Just be careful: when you set `error = TRUE`, `expect_snapshot()` checks that at least one expression throws an error, not that every expression throws an error. For example, look above and notice that adding a date and a factor generated a warning, not an error. Snapshot tests are particularly important when testing complex error messages, such as those that you might generate with cli. Here's a more realistic example illustrating how you might test `check_unnamed()`, a function that ensures all arguments in `...` are unnamed. ```{r} check_unnamed <- function(..., call = parent.frame()) { names <- ...names() has_name <- names != "" if (!any(has_name)) { return(invisible()) } named <- names[has_name] cli::cli_abort( c( "All elements of {.arg ...} must be unnamed.", i = "You supplied argument{?s} {.arg {named}}." ), call = call ) } test_that("no errors if all arguments unnamed", { expect_no_error(check_unnamed()) expect_no_error(check_unnamed(1, 2, 3)) }) test_that("actionable feedback if some or all arguments named", { expect_snapshot(error = TRUE, { check_unnamed(x = 1, 2) check_unnamed(x = 1, y = 2) }) }) ``` ## Other challenges ### Varying outputs Sometimes part of the output varies in ways that you can't easily control. In many cases, it's convenient to use mocking (`vignette("mocking")`) to ensure that every run of the function always produces the same output. In other cases, it's easier to manipulate the text output with a regular expression or similar. That's the job of the `transform` argument, which should be passed a function that takes a character vector of lines and returns a modified vector. This type of problem often crops up when you are testing a function that gives feedback about a path. In your tests, you'll typically use a temporary path (e.g., from `withr::local_tempfile()`), so if you display the path in a snapshot, it will be different every time. For example, consider this "safe" version of `writeLines()` that requires you to explicitly opt in to overwriting an existing file: ```{r} safe_write_lines <- function(lines, path, overwrite = FALSE) { if (file.exists(path) && !overwrite) { cli::cli_abort(c( "{.path {path}} already exists.", i = "Set {.code overwrite = TRUE} to overwrite" )) } writeLines(lines, path) } ``` If you use a snapshot test to confirm that the error message is useful, the snapshot will be different every time the test is run: ```{r} #| include: false snapper$end_file() snapper$start_file("snapshotting.Rmd", "safe-write-lines") ``` ```{r} test_that("generates actionable error message", { path <- withr::local_tempfile(lines = "") expect_snapshot(safe_write_lines(letters, path), error = TRUE) }) ``` ```{r} #| include: false snapper$end_file() snapper$start_file("snapshotting.Rmd", "safe-write-lines") ``` ```{r} #| error: true test_that("generates actionable error message", { path <- withr::local_tempfile(lines = "") expect_snapshot(safe_write_lines(letters, path), error = TRUE) }) ``` ```{r} #| include: false snapper$end_file() snapper$start_file("snapshotting.Rmd", "test-2") ``` One way to fix this problem is to use the `transform` argument to replace the temporary path with a fixed value: ```{r} test_that("generates actionable error message", { path <- withr::local_tempfile(lines = "") expect_snapshot( safe_write_lines(letters, path), error = TRUE, transform = \(lines) gsub(path, "", lines, fixed = TRUE) ) }) ``` Now even though the path varies, the snapshot does not. ### `local_reproducible_output()` By default, testthat sets a number of options that simplify and standardize output: * The console width is set to 80. * {cli} ANSI coloring and hyperlinks are suppressed. * Unicode characters are suppressed. These are sound defaults that we have found useful to minimize spurious differences between tests run in different environments. However, there are times when you want to deliberately test different widths, ANSI escapes, or Unicode characters, so you can override the defaults with `local_reproducible_output()`. ### Snapshotting graphics If you need to test graphical output, use {vdiffr}. vdiffr is used to test ggplot2 and incorporates everything we know about high-quality graphics tests that minimize false positives. Graphics testing is still often fragile, but using vdiffr means you will avoid all the problems we know how to avoid. ### Snapshotting values `expect_snapshot()` is the most used snapshot function because it records everything: the code you run, printed output, messages, warnings, and errors. If you care about the return value rather than any side effects, you might want to use `expect_snapshot_value()` instead. It offers a number of serialization approaches that provide a tradeoff between accuracy and human readability. ```{r} test_that("can snapshot a simple list", { x <- list(a = list(1, 5, 10), b = list("elephant", "banana")) expect_snapshot_value(x) }) ``` ## Whole file snapshotting `expect_snapshot()`, `expect_snapshot_output()`, `expect_snapshot_error()`, and `expect_snapshot_value()` use one snapshot file per test file. But that doesn't work for all file types—for example, what happens if you want to snapshot an image? `expect_snapshot_file()` provides an alternative workflow that generates one snapshot per expectation, rather than one file per test. Assuming you're in `test-burger.R`, then the snapshot created by `expect_snapshot_file(code_that_returns_path_to_file(), "toppings.png")` would be saved in `tests/testthat/_snaps/burger/toppings.png`. If a future change in the code creates a different file, it will be saved in `tests/testthat/_snaps/burger/toppings.new.png`. Unlike `expect_snapshot()` and friends, `expect_snapshot_file()` can't provide an automatic diff when the test fails. Instead, you'll need to call `snapshot_review()`. This launches a Shiny app that allows you to visually review each change and approve it if it's deliberate: ```{r} #| echo: false #| fig-alt: Screenshot of the Shiny app for reviewing snapshot #| changes to images. It shows the changes to a png file of #| a plot created in a snapshot test. There is a button #| to accept the changed snapshot, or to skip it. knitr::include_graphics("review-image.png") ``` ```{r} #| echo: false #| fig-alt: Screenshot of the Shiny app for reviewing snapshot #| changes to text files. It shows the changes to a .R file #| created in a snapshot test, where a line has been removed. #| There is a button to accept the changed snapshot, or to skip it. knitr::include_graphics("review-text.png") ``` The display varies based on the file type (currently text files, common image files, and csv files are supported). Sometimes the failure occurs in a non-interactive environment where you can't run `snapshot_review()`, e.g., in `R CMD check`. In this case, the easiest fix is to retrieve the `.new` file, copy it into the appropriate directory, and then run `snapshot_review()` locally. If this happens on GitHub, testthat provides some tools to help you in the form of `gh_download_artifact()`. In most cases, we don't expect you to use `expect_snapshot_file()` directly. Instead, you'll use it via a wrapper that does its best to gracefully skip tests when differences in platform or package versions make it unlikely to generate perfectly reproducible output. That wrapper should also typically call `announce_snapshot_file()` to avoid snapshots being incorrectly cleaned up—see the documentation for more details. ## Previous work This is not the first time that testthat has attempted to provide snapshot testing (although it's the first time I knew what other languages called them). This section describes some of the previous attempts and why we believe the new approach is better. - `verify_output()` has three main drawbacks: - You have to supply a path where the output will be saved. This seems like a small issue, but thinking of a good name, and managing the difference between interactive and test-time paths introduces a surprising amount of friction. - It always overwrites the previous result, automatically assuming that the changes are correct. That means you have to use it with git, and it's easy to accidentally accept unwanted changes. - It's relatively coarse grained, which means tests that use it tend to keep growing and growing. - `expect_known_output()` is a finer-grained version of `verify_output()` that captures output from a single function. The requirement to produce a path for each individual expectation makes it even more painful to use. - `expect_known_value()` and `expect_known_hash()` have all the disadvantages of `expect_known_output()`, but also produce binary output, meaning that you can't easily review test differences in pull requests. testthat/inst/doc/challenging-tests.html0000644000176200001440000007104715130237642020151 0ustar liggesusers Testing challenging functions

    Testing challenging functions

    This vignette is a quick reference guide for testing challenging functions. It’s organized by problem type rather than technique, so you can quickly skim the whole vignette, spot the problem you’re facing, and then learn more about useful tools for solving it. In it, you’ll learn how to overcome the following challenges:

    • Functions with implicit inputs, like options and environment variables.
    • Random number generators.
    • Tests that can’t be run in some environments.
    • Testing web APIs.
    • Testing graphical output.
    • User interaction.
    • User-facing text.
    • Repeated code.

    Options and environment variables

    If your function depends on options or environment variables, first try refactoring the function to make the inputs explicit. If that’s not possible, use functions like withr::local_options() or withr::local_envvar() to temporarily change options and environment values within a test. Learn more in vignette("test-fixtures").

    Random numbers

    What happens if you want to test a function that relies on randomness in some way? If you’re writing a random number generator, you probably want to generate a large quantity of random numbers and then apply some statistical test. But what if your function just happens to use a little bit of pre-existing randomness? How do you make your tests repeatable and reproducible? Under the hood, random number generators generate different numbers because they update a special .Random.seed variable stored in the global environment. You can temporarily set this seed to a known value to make your random numbers reproducible with withr::local_seed(), making random numbers a special case of test fixtures (vignette("test-fixtures")).

    Here’s a simple example showing how you might test the basic operation of a function that rolls a die:

    dice <- function() {
      sample(6, 1)
    }
    
    test_that("dice returns different numbers", {
      withr::local_seed(1234)
    
      expect_equal(dice(), 4)
      expect_equal(dice(), 2)
      expect_equal(dice(), 6)
    })
    #> Test passed with 3 successes 🥇.

    Alternatively, you might want to mock (vignette("mocking")) the function to eliminate randomness.

    roll_three <- function() {
      sum(dice(), dice(), dice())
    }
    
    test_that("three dice adds values of individual calls", {
      local_mocked_bindings(dice = mock_output_sequence(1, 2, 3))
      expect_equal(roll_three(), 6)
    })
    #> Test passed with 1 success 🌈.

    When should you set the seed and when should you use mocking? As a general rule of thumb, set the seed when you want to test the actual random behavior, and use mocking when you want to test the logic that uses the random results.

    Some tests can’t be run in some circumstances

    You can skip a test without it passing or failing if you can’t or don’t want to run it (e.g., it’s OS dependent, it only works interactively, or it shouldn’t be tested on CRAN). Learn more in vignette("skipping").

    HTTP requests

    If you’re trying to test functions that rely on HTTP requests, we recommend using {vcr} or {httptest2}. These packages both allow you to interactively record HTTP responses and then later replay them in tests. This is a specialized type of mocking (vignette("mocking")) that works with {httr} and {httr2} to isolates your tests from failures in the underlying API.

    If your package is going to CRAN, you must either use one of these packages or use skip_on_cran() for all internet-facing tests. Otherwise, you are at high risk of failing R CMD check if the underlying API is temporarily down. This sort of failure causes extra work for the CRAN maintainers and extra hassle for you.

    Graphics

    The only type of testing you can use for graphics is snapshot testing (vignette("snapshotting")) via expect_snapshot_file(). Graphical snapshot testing is surprisingly challenging because you need pixel-perfect rendering across multiple versions of multiple operating systems, and this is hard, mostly due to imperceptible differences in font rendering. Fortunately we’ve needed to overcome these challenges in order to test {ggplot2}, and you can benefit from our experience by using {vdiffr} when testing graphical output.

    User interaction

    If you’re testing a function that relies on user feedback (e.g. from readline(), utils::menu(), or utils::askYesNo()), you can use mocking (vignette("mocking")) to return fixed values within the test. For example, imagine that you’ve written the following function that asks the user if they want to continue:

    continue <- function(prompt) {
      cat(prompt, "\n", sep = "")
    
      repeat {
        val <- readline("Do you want to continue? (y/n) ")
        if (val %in% c("y", "n")) {
          return(val == "y")
        }
        cat("! You must enter y or n\n")
      }  
    }
    
    readline <- NULL

    You could test its behavior by mocking readline() and using a snapshot test:

    test_that("user must respond y or n", {
      mock_readline <- local({
        i <- 0
        function(prompt) {
          i <<- i + 1
          cat(prompt)
          val <- if (i == 1) "x" else "y"
          cat(val, "\n", sep = "")
          val
        }
      })
    
      local_mocked_bindings(readline = mock_readline)
      expect_snapshot(val <- continue("This is dangerous"))
      expect_true(val)
    })
    #> ── Warning: user must respond y or n ───────────────────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   val <- continue("This is dangerous")
    #> Output
    #>   This is dangerous
    #>   Do you want to continue? (y/n) x
    #>   ! You must enter y or n
    #>   Do you want to continue? (y/n) y
    #> Test passed with 2 successes 🎊.

    If you don’t care about reproducing the output of continue() and just want to recreate its return value, you can use mock_output_sequence(). This creates a function that returns the input supplied to mock_output_sequence() in sequence: the first input at the first call, the second input at the second call, etc. The following code shows how it works and how you might use it to test readline():

    f <- mock_output_sequence(1, 12, 123)
    f()
    #> [1] 1
    f()
    #> [1] 12
    f()
    #> [1] 123

    And

    test_that("user must respond y or n", {
      local_mocked_bindings(readline = mock_output_sequence("x", "y"))
      expect_true(continue("This is dangerous"))
    })
    #> This is dangerous
    #> ! You must enter y or n
    #> Test passed with 1 success 😀.

    If you were testing the behavior of some function that uses continue(), you might want to mock continue() instead of readline(). For example, the function below requires user confirmation before overwriting an existing file. In order to focus our tests on the behavior of just this function, we mock continue() to return either TRUE or FALSE without any user messaging.

    save_file <- function(path, data) {
      if (file.exists(path)) {
        if (!continue("`path` already exists")) {
          stop("Failed to continue")
        }
      }
      writeLines(data, path)
    }
    
    test_that("save_file() requires confirmation to overwrite file", {
      path <- withr::local_tempfile(lines = letters)
    
      local_mocked_bindings(continue = function(...) TRUE)
      save_file(path, "a")
      expect_equal(readLines(path), "a")
    
      local_mocked_bindings(continue = function(...) FALSE)
      expect_snapshot(save_file(path, "a"), error = TRUE)
    })
    #> ── Warning: save_file() requires confirmation to overwrite file ────────────────
    #> Adding new snapshot:
    #> Code
    #>   save_file(path, "a")
    #> Condition
    #>   Error in `save_file()`:
    #>   ! Failed to continue
    #> Test passed with 2 successes 🥇.

    User-facing text

    Errors, warnings, and other user-facing text should be tested to ensure they’re both actionable and consistent across the package. Obviously, it’s not possible to test this automatically, but you can use snapshots (vignette("snapshotting")) to ensure that user-facing messages are clearly shown in PRs and easily reviewed by another human.

    Repeated code

    If you find yourself repeating the same set of expectations again and again across your test suite, it may be a sign that you should design your own expectation. Learn how in vignette("custom-expectation").

    testthat/inst/doc/skipping.html0000644000176200001440000004426715130237646016372 0ustar liggesusers Skipping tests

    Skipping tests

    Sometimes you have tests that you can’t or don’t want to run in certain circumstances. This vignette describes how to skip tests to avoid execution in undesired environments. The most common scenarios are:

    • You’re testing a web service that occasionally fails, and you don’t want to run the tests on CRAN. Or the API requires authentication, and you can only run the tests when you’ve securely distributed secrets.

    • You’re relying on features that not all operating systems possess, and want to make sure your code doesn’t run on platforms where it doesn’t work. The most common platform with limitations is Windows, which among other things lacks full UTF-8 support.

    • You’re writing tests for multiple versions of R or multiple versions of a dependency, and you want to skip when a feature isn’t available. You generally don’t need to skip tests if a suggested package is not installed. This is only needed in exceptional circumstances, e.g., when a package is not available on some operating systems.

    library(testthat)

    Basics

    testthat comes with a variety of helpers for the most common situations:

    • skip_if_not_installed() skips if a required package is not installed. You can optionally supply a minimal version too.

    • skip_on_cran() skips tests on CRAN. skip_on_bioc() skips tests on Bioconductor. This is useful for slow tests and tests that occasionally fail for reasons outside of your control.

    • skip_on_os() allows you to skip tests on a specific operating system. Generally, you should strive to avoid this as much as possible (so your code works the same on all platforms), but sometimes it’s just not possible.

    • skip_on_ci() skips tests on most CI platforms (e.g., GitHub Actions).

    • skip_on_covr() skips tests during code coverage.

    • skip_unless_r(">= 4.2") only runs tests for newer R versions. skip_unless_r("< 4.2") only runs tests for older R versions.

    You can implement your own using skips skip_if() or skip_if_not():

    # Only run test if a token file is available
    skip_if_not(file.exists("secure-token.json"))
    
    # Only run test if R has memory profiling capabilities
    skip_if_not(capabilities("profmem"))
    
    # Only run if we've opted-in to slow tests with an env var
    skip_if(Sys.getenv("RUN_SLOW_TESTS") == "true")

    All reporters show which tests are skipped. As of testthat 3.0.0, ProgressReporter (used interactively) and CheckReporter (used inside R CMD check) also display a summary of skips across all tests. It looks something like this:

    ── Skipped tests  ───────────────────────────────────────────────────────
    ● No token (3)
    ● On CRAN (1)

    This display is really important, and you should keep an eye on it when working on your test suite. If you accidentally skip too many tests, you can trick yourself into believing your code is working correctly, when actually you’re just not testing it.

    Helpers

    If you find yourself using the same skip_if() or skip_if_not() expression across multiple tests, it’s a good idea to create a helper function. This function should start with skip_ and live in a tests/testthat/helper-{something}.R file:

    skip_if_dangerous <- function() {
      if (!identical(Sys.getenv("DANGER"), "")) {
        skip("Not run in dangerous environments.")
      } else {
        invisible()
      }
    }

    Embedding skip() in package functions

    Another useful technique is to embed a skip() directly into a package function. For example, take a look at pkgdown:::convert_markdown_to_html(), which absolutely cannot work if the Pandoc tool is unavailable:

    convert_markdown_to_html <- function(in_path, out_path, ...) {
      if (rmarkdown::pandoc_available("2.0")) {
        from <- "markdown+gfm_auto_identifiers-citations+emoji+autolink_bare_uris"
      } else if (rmarkdown::pandoc_available("1.12.3")) {
        from <- "markdown_github-hard_line_breaks+tex_math_dollars+tex_math_single_backslash+header_attributes"
      } else {
        if (is_testing()) {
          testthat::skip("Pandoc not available")
        } else {
          abort("Pandoc not available")
        }
      }
      
      ...
    }

    If Pandoc is not available when convert_markdown_to_html() executes, it throws an error unless it appears to be part of a test run, in which case the test is skipped. This is an alternative to implementing a custom skipper, e.g., skip_if_no_pandoc(), and inserting it into many of pkgdown’s tests.

    We don’t want pkgdown to have a runtime dependency on testthat, so pkgdown includes a copy of testthat::is_testing():

    is_testing <- function() {
      identical(Sys.getenv("TESTTHAT"), "true")
    }

    It might look like the code still has a runtime dependency on testthat, because of the call to testthat::skip(). But testthat::skip() is only executed during a test run, which means that testthat is installed.

    We have mixed feelings about this approach. On the one hand, it feels elegant and concise, and it absolutely guarantees that you’ll never miss a needed skip in one of your tests. On the other hand, it mixes code and tests in an unusual way, and when you’re focused on the tests, it’s easy to miss the fact that a package function contains a skip().

    testthat/inst/doc/skipping.Rmd0000644000176200001440000001324715067547665016160 0ustar liggesusers--- title: "Skipping tests" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Skipping tests} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` Sometimes you have tests that you can't or don't want to run in certain circumstances. This vignette describes how to **skip** tests to avoid execution in undesired environments. The most common scenarios are: - You're testing a web service that occasionally fails, and you don't want to run the tests on CRAN. Or the API requires authentication, and you can only run the tests when you've [securely distributed](https://gargle.r-lib.org/articles/articles/managing-tokens-securely.html) secrets. - You're relying on features that not all operating systems possess, and want to make sure your code doesn't run on platforms where it doesn't work. The most common platform with limitations is Windows, which among other things lacks full UTF-8 support. - You're writing tests for multiple versions of R or multiple versions of a dependency, and you want to skip when a feature isn't available. You generally don't need to skip tests if a suggested package is not installed. This is only needed in exceptional circumstances, e.g., when a package is not available on some operating systems. ```{r setup} library(testthat) ``` ## Basics testthat comes with a variety of helpers for the most common situations: - `skip_if_not_installed()` skips if a required package is not installed. You can optionally supply a minimal version too. - `skip_on_cran()` skips tests on CRAN. `skip_on_bioc()` skips tests on Bioconductor. This is useful for slow tests and tests that occasionally fail for reasons outside of your control. - `skip_on_os()` allows you to skip tests on a specific operating system. Generally, you should strive to avoid this as much as possible (so your code works the same on all platforms), but sometimes it's just not possible. - `skip_on_ci()` skips tests on most CI platforms (e.g., GitHub Actions). - `skip_on_covr()` skips tests during code coverage. - `skip_unless_r(">= 4.2")` only runs tests for newer R versions. `skip_unless_r("< 4.2")` only runs tests for older R versions. You can implement your own using skips `skip_if()` or `skip_if_not()`: ```{r} #| eval: false # Only run test if a token file is available skip_if_not(file.exists("secure-token.json")) # Only run test if R has memory profiling capabilities skip_if_not(capabilities("profmem")) # Only run if we've opted-in to slow tests with an env var skip_if(Sys.getenv("RUN_SLOW_TESTS") == "true") ``` All reporters show which tests are skipped. As of testthat 3.0.0, ProgressReporter (used interactively) and CheckReporter (used inside `R CMD check`) also display a summary of skips across all tests. It looks something like this: ``` ── Skipped tests ─────────────────────────────────────────────────────── ● No token (3) ● On CRAN (1) ``` This display is really important, and you should keep an eye on it when working on your test suite. If you accidentally skip too many tests, you can trick yourself into believing your code is working correctly, when actually you're just not testing it. ## Helpers If you find yourself using the same `skip_if()` or `skip_if_not()` expression across multiple tests, it's a good idea to create a helper function. This function should start with `skip_` and live in a `tests/testthat/helper-{something}.R` file: ```{r} skip_if_dangerous <- function() { if (!identical(Sys.getenv("DANGER"), "")) { skip("Not run in dangerous environments.") } else { invisible() } } ``` ## Embedding `skip()` in package functions Another useful technique is to embed a `skip()` directly into a package function. For example, take a look at [`pkgdown:::convert_markdown_to_html()`](https://github.com/r-lib/pkgdown/blob/v2.0.7/R/markdown.R#L95-L106), which absolutely cannot work if the Pandoc tool is unavailable: ```{r eval = FALSE} convert_markdown_to_html <- function(in_path, out_path, ...) { if (rmarkdown::pandoc_available("2.0")) { from <- "markdown+gfm_auto_identifiers-citations+emoji+autolink_bare_uris" } else if (rmarkdown::pandoc_available("1.12.3")) { from <- "markdown_github-hard_line_breaks+tex_math_dollars+tex_math_single_backslash+header_attributes" } else { if (is_testing()) { testthat::skip("Pandoc not available") } else { abort("Pandoc not available") } } ... } ``` If Pandoc is not available when `convert_markdown_to_html()` executes, it throws an error *unless* it appears to be part of a test run, in which case the test is skipped. This is an alternative to implementing a custom skipper, e.g., `skip_if_no_pandoc()`, and inserting it into many of pkgdown's tests. We don't want pkgdown to have a runtime dependency on testthat, so pkgdown includes a copy of `testthat::is_testing()`: ```{r} is_testing <- function() { identical(Sys.getenv("TESTTHAT"), "true") } ``` It might look like the code still has a runtime dependency on testthat, because of the call to `testthat::skip()`. But `testthat::skip()` is only executed during a test run, which means that testthat is installed. We have mixed feelings about this approach. On the one hand, it feels elegant and concise, and it absolutely guarantees that you'll never miss a needed skip in one of your tests. On the other hand, it mixes code and tests in an unusual way, and when you're focused on the tests, it's easy to miss the fact that a package function contains a `skip()`. testthat/inst/doc/special-files.html0000644000176200001440000003464215130237651017256 0ustar liggesusers Special files

    Special files

    This vignette describes the various special files that testthat understands: test, helper, setup/teardown, snapshot, and everything else.

    Test files

    These are the bread and butter of testthat. Test files live in tests/testthat/, start with either test- or test_, and end with .r or .R. We recommend organising your test files so that there’s a one-to-one correspondence between the files in R/ and the files in tests/testthat/ so that (e.g.) R/myfile.R has a matching tests/testthat/test-myfile.R. This correspondence is maintained by functions like usethis::use_r() and usethis::use_test() and is taken advantage of by functions like devtools::test_active_file() and devtools::test_coverage_active_file().

    Test files are executed in alphabetical order, but you should strive to avoid dependencies between test files. In principle, you should be able to be run your test files in any order or even at the same time.

    Helper files

    Helper files live in tests/testthat/, start with helper, and end with .r or .R. They are sourced by devtools::load_all() (so they’re available interactively when developing your packages) and by test_check() and friends (so that they’re available no matter how your tests are executed).

    Helper files are a useful place for functions that you’ve extracted from repeated code in your tests, whether that be test fixtures (vignette("test-fixtures")), custom expectations (vignette("custom-expectation")), or skip helpers (vignette("skipping")).

    Setup files

    Setup files live in tests/testthat/, start with setup, and end with .r or .R. Typically there is only one setup file which, by convention, is tests/testthat/setup.R. Setup files are sourced by test_check() and friends (so that they’re available no matter how your tests are executed), but they are not sourced by devtools::load_all().

    Setup files are good place to put truly global test setup that would be impractical to build into every single test and that might be tailored for test execution in non-interactive or remote environments. Examples:

    • Turning off behaviour aimed at an interactive user, such as messaging or writing to the clipboard.
    • Setting up a cache folder.

    If any of your setup should be reversed after test execution (i.e. it needs to be torn down), we recommend maintaining that teardown code alongside the setup code, in setup.R, because this makes it easier to ensure they stay in sync. The artificial environment teardown_env() exists as a magical handle to use in withr::defer() and withr::local_*(). A legacy approach (which still works, but is no longer recommended) is to put teardown code in tests/testthat/teardown.R.

    Here’s a setup.R example from the reprex package, where we turn off clipboard and HTML preview functionality during testing:

    op <- options(reprex.clipboard = FALSE, reprex.html_preview = FALSE)
    
    withr::defer(options(op), teardown_env())

    Since we are just modifying options here, we can be even more concise and use the pre-built function withr::local_options() and pass teardown_env() as the .local_envir:

    withr::local_options(
      list(reprex.clipboard = FALSE, reprex.html_preview = FALSE),
      .local_envir = teardown_env()
    )

    Teardown files

    Teardown files live in tests/testhat/, start with teardown and end with .r or .R. They are executed after the tests are run, but we no longer recommend using them as it’s easier to check that you clean up every mess that you make if you interleave setup and tear down code as described above.

    Snapshot files

    Snapshot files live in tests/testthat/_snaps/. Snapshot file are named automatically based on the name of the test file so that tests/testthat/test-one.R will generated tests/testthat/_snaps/one.md. Learn more about snapshot tests in vignette("snapshotting").

    Other files and folders

    Other files and folders in tests/testthat/ are ignored by testthat, making them a good place to store persistent test data. Since the precise location of the test/testthat/ directory varies slightly depending on how you’re running the test, we recommend creating paths to these files and directories using test_path().

    testthat/inst/doc/test-fixtures.html0000644000176200001440000013141015130237653017355 0ustar liggesusers Test fixtures

    Test fixtures

    Test hygiene

    Take nothing but memories, leave nothing but footprints.

    ― Chief Si’ahl

    Ideally, a test should leave the world exactly as it found it. But you often need to make changes to exercise every part of your code:

    • Create a file or directory
    • Create a resource on an external system
    • Set an R option
    • Set an environment variable
    • Change working directory
    • Change an aspect of the tested package’s state

    How can you clean up these changes to get back to a clean slate? Scrupulous attention to cleanup is more than just courtesy or being fastidious. It’s also self-serving. The state of the world after test i is the starting state for test i + 1. Tests that change state willy-nilly eventually end up interfering with each other in ways that can be very difficult to debug.

    Most tests are written with an implicit assumption about the starting state, usually whatever tabula rasa means for the target domain of your package. If you accumulate enough sloppy tests, you will eventually find yourself asking the programming equivalent of questions like “Who forgot to turn off the oven?” and “Who didn’t clean up after the dog?” (If you’ve got yourself into this state, testthat provides another tool to help you figure out exactly which test is to blame: set_state_inspector().)

    It’s also important that your setup and cleanup are easy to use when working interactively. When a test fails, you want to be able to quickly recreate the exact environment in which the test is run so you can interactively experiment to figure out what went wrong.

    This article introduces a powerful technique that allows you to solve both problems: test fixtures. We’ll begin by discussing some canned tools, then learn about the underlying theory, discuss exactly what a test fixture is, and finish with a few examples.

    library(testthat)

    local_ helpers

    We’ll begin by giving you the minimal knowledge needed to change global state just within your test. The withr package provides a number of functions that temporarily change the state of the world, carefully undoing the changes when the current function or test finishes:

    Do / undo this withr function
    Create a file local_tempfile()
    Create a directory local_tempdir()
    Set an R option local_options()
    Set an environment variable local_envvar()
    Change working directory local_dir()

    (You can see a full list at https://withr.r-lib.org/ but these five are by far the most commonly used.)

    These allow you to control options that would otherwise be painful. For example, imagine you’re testing base R code that rounds numbers to a fixed number of places when printing. You could write code like this:

    test_that("print() respects digits option", {
      x <- 1.23456789
    
      withr::local_options(digits = 1)
      expect_equal(capture.output(x), "[1] 1")
    
      withr::local_options(digits = 5)
      expect_equal(capture.output(x), "[1] 1.2346")
    })
    #> Test passed with 2 successes 🎉.

    If you write a lot of code like this in your tests, you might decide you want a helper function or test fixture that reduces the duplication. Fortunately withr’s local functions allow us to solve this problem by providing an .local_envir or envir argument that controls when cleanup occurs. The exact details of how this works are rather complicated, but fortunately there’s a common pattern you can use without understanding all the details. Your helper function should always have an env argument that defaults to parent.frame(), which you pass to the .local_envir argument of local_():

    local_digits <- function(sig_digits, env = parent.frame()) {
      withr::local_options(digits = sig_digits, .local_envir = env)
    
      # mark that this function is called for its side-effects not its return value
      invisible() 
    }

    Foundations

    Before we go further, let’s lay some foundations to help you understand how local_ functions work. We’ll motivate the discussion with a sloppy() function that prints a number with a specific number of significant digits by adjusting an R option:

    sloppy <- function(x, sig_digits) {
      options(digits = sig_digits)
      print(x)
    }
    
    pi
    #> [1] 3.141593
    sloppy(pi, 2)
    #> [1] 3.1
    pi
    #> [1] 3.1

    Notice how pi prints differently before and after the call to sloppy(). Calling sloppy() has a side effect: it changes the digits option globally, not just within its own scope. This is what we want to avoid1.

    on.exit()

    The first function you need to know about is base R’s on.exit(). on.exit() calls the code supplied to its first argument when the current function exits, regardless of whether it returns a value or throws an error. You can use on.exit() to clean up after yourself by ensuring that every mess-making function call is paired with an on.exit() call that cleans up.

    We can use this idea to turn sloppy() into neat():

    neat <- function(x, sig_digits) {
      op <- options(digits = sig_digits)
      on.exit(options(op), add = TRUE, after = FALSE)
      print(x)
    }
    
    pi
    #> [1] 3.141593
    neat(pi, 2)
    #> [1] 3.1
    pi
    #> [1] 3.141593

    Here we make use of a useful pattern that options() implements: when you call options(digits = sig_digits), it both sets the digits option and (invisibly) returns the previous value of digits. We can then use that value to restore the previous options.

    on.exit() also works in tests:

    test_that("can print one digit of pi", {
      op <- options(digits = 1)
      on.exit(options(op), add = TRUE, after = FALSE)
      
      expect_output(print(pi), "3")
    })
    #> Test passed with 1 success 😀.
    pi
    #> [1] 3.141593

    There are three main drawbacks to on.exit():

    • You should always call it with add = TRUE and after = FALSE. These ensure that the call is added to the list of deferred tasks (instead of replacing them) and is added to the front of the stack (not the back), so that cleanup occurs in reverse order to setup. These arguments only matter if you’re using multiple on.exit() calls, but it’s a good habit to always use them to avoid potential problems down the road.

    • It doesn’t work outside a function or test. If you run the following code in the global environment, you won’t get an error, but the cleanup code will never be run:

      op <- options(digits = 1)
      on.exit(options(op), add = TRUE, after = FALSE)

      This is annoying when you are running tests interactively.

    • You can’t program with it; on.exit() always works inside the current function, so you can’t wrap up repeated on.exit() code in a helper function.

    To resolve these drawbacks, we use withr::defer().

    withr::defer()

    withr::defer() resolves the main drawbacks of on.exit(). First, it has the behavior we want by default; no extra arguments needed:

    neat <- function(x, sig_digits) {
      op <- options(digits = sig_digits)
      withr::defer(options(op))
      print(x)
    }

    Second, it works when called in the global environment. Since the global environment isn’t perishable, like a test environment is, you have to call deferred_run() explicitly to execute the deferred events. You can also clear them, without running, with deferred_clear().

    withr::defer(print("hi"))
    #> Setting deferred event(s) on global environment.
    #>   * Execute (and clear) with `deferred_run()`.
    #>   * Clear (without executing) with `deferred_clear()`.
    
    withr::deferred_run()
    #> [1] "hi"

    Finally, withr::defer() lets you pick which function to bind the cleanup behavior to. This makes it possible to create helper functions.

    “Local” helpers

    Imagine we have many functions where we want to temporarily set the digits option. Wouldn’t it be nice if we could write a helper function to automate this? Unfortunately, we can’t write a helper with on.exit():

    local_digits <- function(sig_digits) {
      op <- options(digits = sig_digits)
      on.exit(options(op), add = TRUE, after = FALSE)
    }
    neater <- function(x, sig_digits) {
      local_digits(1)
      print(x)
    }
    neater(pi)
    #> [1] 3.141593

    This code doesn’t work because the cleanup happens too soon, when local_digits() exits, not when neater() finishes.

    Fortunately, withr::defer() allows us to solve this problem by providing an envir argument that allows you to control when cleanup occurs. The exact details of how this works are rather complicated, but fortunately there’s a common pattern you can use without understanding all the details. Your helper function should always have an env argument that defaults to parent.frame(), which you pass to the second argument of defer():

    local_digits <- function(sig_digits, env = parent.frame()) {
      op <- options(digits = sig_digits)
      withr::defer(options(op), env)
    }
    
    neater(pi)
    #> [1] 3

    Just like on.exit() and defer(), our helper also works within tests:

    test_that("withr lets us write custom helpers for local state manipulation", {
      local_digits(1)
      expect_output(print(exp(1)), "3")
      
      local_digits(3)
      expect_output(print(exp(1)), "2.72")
    })
    #> Test passed with 2 successes 🥇.
    
    print(exp(1))
    #> [1] 2.718282

    We always call these helper functions local_*; “local” here refers to the fact that the state change persists only locally, for the lifetime of the associated function or test. Another reason we call them “local” is that you can also use the local() function if you want to scope their effect to a smaller part of the test:

    test_that("local_options() only affects a minimal amount of code", {
      withr::local_options(x = 1)
      expect_equal(getOption("x"), 1)
    
      local({
        withr::local_options(x = 2)
        expect_equal(getOption("x"), 2)
      })
    
      expect_equal(getOption("x"), 1)
    })
    #> Test passed with 3 successes 🥇.
    
    getOption("x")
    #> NULL

    Test fixtures

    Testing is often demonstrated with cute little tests and functions where all the inputs and expected results can be inlined. But in real packages, things aren’t always so simple, and functions often depend on global state. For example, take this variant on message() that only shows a message if the verbose option is TRUE. How would you test that setting the option does indeed silence the message?

    message2 <- function(...) {
      if (!isTRUE(getOption("verbose"))) {
        return()
      }
      message(...)
    }

    In some cases, it’s possible to make the global state an explicit argument to the function. For example, we could refactor message2() to make the verbosity an explicit argument:

    message3 <- function(..., verbose = getOption("verbose")) {
      if (!isTRUE(verbose)) {
        return()
      }
      message(...)
    }

    Making external state explicit is often worthwhile because it makes clearer exactly what inputs determine the outputs of your function. But it’s simply not possible in many cases. That’s where test fixtures come in: they allow you to temporarily change global state to test your function. Test fixture is a pre-existing term in the software engineering world (and beyond):

    A test fixture is something used to consistently test some item, device, or piece of software.

    Wikipedia

    A test fixture is just a local_* function that you use to change state in such a way that you can reach inside and test parts of your code that would otherwise be challenging. For example, here’s how you could use withr::local_options() as a test fixture to test message2():

    test_that("message2() output depends on verbose option", {
      withr::local_options(verbose = TRUE)
      expect_message(message2("Hi!"))
      
      withr::local_options(verbose = FALSE)
      expect_message(message2("Hi!"), NA)
    })
    #> Test passed with 2 successes 🎊.

    Case study: usethis

    One place that we use test fixtures extensively is in the usethis package (usethis.r-lib.org), which provides functions for looking after the files and folders in R projects, especially packages. Many of these functions only make sense in the context of a package, which means to test them, we also have to be working inside an R package. We need a way to quickly spin up a minimal package in a temporary directory, then test some functions against it, then destroy it.

    To solve this problem we create a test fixture, which we place in R/test-helpers.R so that it’s available for both testing and interactive experimentation:

    local_create_package <- function(dir = file_temp(), env = parent.frame()) {
      old_project <- proj_get_()
      
      # create new folder and package
      create_package(dir, open = FALSE) # A
      withr::defer(fs::dir_delete(dir), envir = env) # -A
      
      # change working directory
      withr::local_dir(dir, .local_envir = env) # B + -B
      
      # switch to new usethis project
      proj_set(dir) # C
      withr::defer(proj_set(old_project, force = TRUE), envir = env) # -C
      
      dir
    }

    Note that the cleanup automatically unfolds in the opposite order from the setup. Setup is A, then B, then C; cleanup is -C, then -B, then -A. This is important because we must create directory dir before we can make it the working directory, and we must restore the original working directory before we can delete dir—we can’t delete dir while it’s still the working directory!

    local_create_package() is used in over 170 tests. Here’s one example that checks that usethis::use_roxygen_md() does the setup necessary to use roxygen2 in a package, with markdown support turned on. All 3 expectations consult the DESCRIPTION file, directly or indirectly. So it’s very convenient that local_create_package() creates a minimal package, with a valid DESCRIPTION file, for us to test against. And when the test is done — poof! — the package is gone.

    test_that("use_roxygen_md() adds DESCRIPTION fields", {
      pkg <- local_create_package()
      use_roxygen_md()
      
      expect_true(uses_roxygen_md())
      expect_equal(desc::desc_get("Roxygen", pkg)[[1]], "list(markdown = TRUE)")
      expect_true(desc::desc_has_fields("RoxygenNote", pkg))
    })

    Scope

    So far we have applied our test fixture to individual tests, but it’s also possible to apply them to a file or package.

    File

    If you move the local_*() call outside of a test_that() block, it will affect all tests that come after it. This means that by calling the test fixture at the top of the file, you can change the behavior for all tests. This has both advantages and disadvantages:

    • If you would otherwise have called the fixture in every test, you’ve saved yourself a bunch of work and duplicate code.

    • On the downside, if your test fails and you want to recreate the failure in an interactive environment so you can debug, you need to remember to run all the setup code at the top of the file first.

    Generally, I think it’s better to copy and paste test fixtures across many tests — sure, it adds some duplication to your code, but it makes debugging test failures so much easier.

    Package

    To run code before any test is run, you can create a file called tests/testthat/setup.R. If the code in this file needs clean up, you can use the special teardown_env():

    # Run before any test
    write.csv(mtcars, "mtcars.csv")
    
    # Run after all tests
    withr::defer(unlink("mtcars.csv"), teardown_env())

    Setup code is typically best used to create external resources that are needed by many tests. It’s best kept to a minimum because you will have to manually run it before interactively debugging tests.

    Other challenges

    A collection of miscellaneous problems that don’t fit elsewhere:

    • There are a few base functions that are hard to test because they depend on state that you can’t control. One such example is interactive(): there’s no way to write a test fixture that allows you to pretend that interactive is either TRUE or FALSE. So we now usually use rlang::is_interactive(), which can be controlled by the rlang_interactive option.

    • If you’re using a test fixture in a function, be careful about what you return. For example, if you write a function that does dir <- create_local_package(), you shouldn’t return dir, because after the function returns, the directory will no longer exist.


    1. Don’t worry, I’m restoring global state (specifically, the digits option) behind the scenes here.↩︎

    testthat/inst/doc/custom-expectation.Rmd0000644000176200001440000003024515111027202020126 0ustar liggesusers--- title: "Custom expectations" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Custom expectations} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup} #| include: false library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") # Pretend we're snapshotting snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") ``` This vignette shows you how to write your own expectations. Custom expectations allow you to extend testthat to meet your own specialized testing needs, creating new `expect_*` functions that work exactly the same way as the built-ins. Custom expectations are particularly useful if you want to produce expectations tailored for domain-specific data structures, combine multiple checks into a single expectation, or create more actionable feedback when an expectation fails. You can use them within your package by putting them in a helper file, or share them with others by exporting them from your package. In this vignette, you'll learn about the three-part structure of expectations, how to test your custom expectations, see a few examples, and, if you're writing a lot of expectations, learn how to reduce repeated code. ## Do you need it? But before you read the rest of the vignette and dive into the full details of creating a 100% correct expectation, consider if you can get away with a simpler wrapper. If you're just customising an existing expectation by changing some defaults, you're fine: ```{r} expect_df <- function(tbl) { expect_s3_class(tbl, "data.frame") } ``` If you're combining multiple expectations, you can introduce a subtle problem. For example, take this expectation from tidytext: ```{r} # from tidytext expect_nrow <- function(tbl, n) { expect_s3_class(tbl, "data.frame") expect_equal(nrow(tbl), n) } ``` If we use it in a test you can see there's an issue: ```{r} #| error: true test_that("success", { expect_nrow(mtcars, 32) }) test_that("failure 1", { expect_nrow(mtcars, 30) }) test_that("failure 2", { expect_nrow(matrix(1:5), 2) }) ``` Each of these tests contain a single expectation, but they report a total of two successes and failures. It would be confusing if testthat didn't report these numbers correctly. But as a helper in your package, it's probably not a big deal. You might also notice that these failures generate a backtrace whereas built-in expectations don't. Again, it's not a big deal because the backtrace is correct, it's just not needed. These are both minor issues, so if they don't bother you, you can save yourself some pain by not reading this vignette 😀. ## Expectation basics An expectation has four main parts, as illustrated by `expect_length()`: ```{r} expect_length <- function(object, n) { # 1. Capture object and label act <- quasi_label(rlang::enquo(object)) act_n <- length(act$val) if (act_n != n) { # 2. Fail if expectations are violated fail(c( sprintf("Expected %s to have length %i.", act$lab, n), sprintf("Actual length: %i.", act_n) )) } else { # 3. Pass if expectations are met pass() } # 4. Invisibly return the input value invisible(act$val) } ``` The first step in any expectation is to use `quasi_label()` to capture a "labeled value", i.e., a list that contains both the value (`$val`) for testing and a label (`$lab`) used to make failure messages as informative as possible. This is a pattern that exists for fairly esoteric reasons; you don't need to understand it, just copy and paste it. Next you need to check each way that `object` could violate the expectation. In this case, there's only one check, but in more complicated cases there can be many. Note the specific form of the failure message: the first element describes what we expected, and then the second line reports what we actually saw. If the object is as expected, call `pass()`. This ensures that a success will be registered in the test reporter. Otherwise, call `fail()`. This ensures that a failure will be registered in the test reporter. NB: unlike `stop()` or `abort()`, `fail()` signals a failure but allows code execution to continue, ensuring that one failure does not prevent subsequent expectations from running. Finally, return the input value (`act$val`) invisibly. This is good practice because expectations are called primarily for their side-effects (triggering a failure), and returning the value allows expectations to be piped together: ```{r} #| label: piping test_that("mtcars is a 13 row data frame", { mtcars |> expect_type("list") |> expect_s3_class("data.frame") |> expect_length(11) }) ``` ### Testing your expectations Once you've written your expectation, you need to test it: expectations are functions that can have bugs, just like any other function, and it's really important that they generate actionable failure messages. Luckily testthat comes with three expectations designed specifically to test expectations: * `expect_success()` checks that your expectation emits exactly one success and zero failures. * `expect_failure()` checks that your expectation emits exactly one failure and zero successes. * `expect_snapshot_failure()` captures the failure message in a snapshot, making it easier to review whether it's useful. The first two expectations are particularly important because they ensure that your expectation always reports either a single success or a single failure. If it doesn't, the end user is going to get confusing results in their test suite reports. ```{r} test_that("expect_length works as expected", { x <- 1:10 expect_success(expect_length(x, 10)) expect_failure(expect_length(x, 11)) }) test_that("expect_length gives useful feedback", { x <- 1:10 expect_snapshot_failure(expect_length(x, 11)) }) ``` ## Examples The following sections show you a few more variations, loosely based on existing testthat expectations. These expectations were picked to show how you can generate actionable failures in slightly more complex situations. ### `expect_vector_length()` Let's make `expect_length()` a bit more strict by also checking that the input is a vector. R is a bit unusual in that it gives a length to pretty much every object, and you can imagine not wanting code like the following to succeed, because it's likely that the user passed the wrong object to the test. ```{r} expect_length(mean, 1) ``` To do this we'll add an extra check that the input is either an atomic vector or a list: ```{r} expect_vector_length <- function(object, n) { act <- quasi_label(rlang::enquo(object)) # It's non-trivial to check if an object is a vector in base R so we # use an rlang helper if (!rlang::is_vector(act$val)) { fail(c( sprintf("Expected %s to be a vector", act$lab), sprintf("Actual type: %s", typeof(act$val)) )) } else { act_n <- length(act$val) if (act_n != n) { fail(c( sprintf("Expected %s to have length %i.", act$lab, n), sprintf("Actual length: %i.", act_n) )) } else { pass() } } invisible(act$val) } ``` ```{r} #| error: true expect_vector_length(mean, 1) expect_vector_length(mtcars, 15) ``` ### `expect_s3_class()` Or imagine you're checking to see if an object inherits from an S3 class. R has a lot of different OO systems, and you want your failure messages to be as informative as possible, so before checking that the class matches, you probably want to check that the object is from the correct OO family. ```{r} expect_s3_class <- function(object, class) { if (!rlang::is_string(class)) { rlang::abort("`class` must be a string.") } act <- quasi_label(rlang::enquo(object)) if (!is.object(act$val)) { fail(sprintf("Expected %s to be an object.", act$lab)) } else if (isS4(act$val)) { fail(c( sprintf("Expected %s to be an S3 object.", act$lab), "Actual OO type: S4" )) } else if (!inherits(act$val, class)) { fail(c( sprintf("Expected %s to inherit from %s.", act$lab, class), sprintf("Actual class: %s", class(act$val)) )) } else { pass() } invisible(act$val) } ``` ```{r} #| error: true x1 <- 1:10 TestClass <- methods::setClass("Test", contains = "integer") x2 <- TestClass() x3 <- factor() expect_s3_class(x1, "integer") expect_s3_class(x2, "integer") expect_s3_class(x3, "integer") expect_s3_class(x3, "factor") ``` Note the variety of error messages. We always print what was expected, and where possible, also display what was actually received: * When `object` isn't an object, we can only say what we expected. * When `object` is an S4 object, we can report that. * When `inherits()` is `FALSE`, we provide the actual class, since that's most informative. The general principle is to tailor error messages to what the user can act on based on what you know about the input. Also note that I check that the `class` argument is a string. If it's not a string, I throw an error. This is not a test failure; the user is calling the function incorrectly. In general, you should check the type of all arguments that affect the operation and error if they're not what you expect. ```{r} #| error: true expect_s3_class(x1, 1) ``` ### Optional `class` A common pattern in testthat's own expectations it to use arguments to control the level of detail in the test. Here it would be nice if we check that an object is an S3 object without checking for a specific class. I think we could do that by renaming `expect_s3_class()` to `expect_s3_object()`. Now `expect_s3_object(x)` would verify that `x` is an S3 object, and `expect_s3_object(x, class = "foo")` to verify that `x` is an S3 object with the given class. The implementation of this is straightforward: we also allow `class` to be `NULL` and then only verify inheritance when non-`NULL`. ```{r} expect_s3_object <- function(object, class = NULL) { if (!rlang::is_string(class) && is.null(class)) { rlang::abort("`class` must be a string or NULL.") } act <- quasi_label(rlang::enquo(object)) if (!is.object(act$val)) { fail(sprintf("Expected %s to be an object.", act$lab)) } else if (isS4(act$val)) { fail(c( sprintf("Expected %s to be an S3 object.", act$lab), "Actual OO type: S4" )) } else if (!is.null(class) && !inherits(act$val, class)) { fail(c( sprintf("Expected %s to inherit from %s.", act$lab, class), sprintf("Actual class: %s", class(act$val)) )) } else { pass() } invisible(act$val) } ``` ## Repeated code As you write more expectations, you might discover repeated code that you want to extract into a helper. Unfortunately, creating 100% correct helper functions is not straightforward in testthat because `fail()` captures the calling environment in order to give useful tracebacks, and testthat's own expectations don't expose this as an argument. Fortunately, getting this right is not critical (you'll just get a slightly suboptimal traceback in the case of failure), so we don't recommend bothering in most cases. We document it here, however, because it's important to get it right in testthat itself. The key challenge is that `fail()` captures a `trace_env`, which should be the execution environment of the expectation. This usually works because the default value of `trace_env` is `rlang::caller_env()`. But when you introduce a helper, you'll need to explicitly pass it along: ```{r} expect_length_ <- function(act, n, trace_env = caller_env()) { act_n <- length(act$val) if (act_n != n) { fail( sprintf("%s has length %i, not length %i.", act$lab, act_n, n), trace_env = trace_env ) } else { pass() } } expect_length <- function(object, n) { act <- quasi_label(rlang::enquo(object)) expect_length_(act, n) invisible(act$val) } ``` A few recommendations: * The helper shouldn't be user-facing, so we give it a `_` suffix to make that clear. * It's typically easiest for a helper to take the labeled value produced by `quasi_label()`. * Your helper should usually be called for its side effects (i.e. it calls `fail()` and `pass()`). * You should return `invisible(act$val)` from the parent expecatation as usual. Again, you're probably not writing so many expectations that it makes sense for you to go to this effort, but it is important for testthat to get it right. testthat/inst/doc/snapshotting.R0000644000176200001440000001260315130237650016504 0ustar liggesusers## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) set.seed(1014) ## ----setup-------------------------------------------------------------------- library(testthat) ## ----include = FALSE---------------------------------------------------------- snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") ## ----------------------------------------------------------------------------- bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } cat(bullets("a", id = "x")) ## ----------------------------------------------------------------------------- test_that("bullets", { expect_equal(bullets("a"), "
      \n
    • a
    • \n
    \n") expect_equal(bullets("a", id = "x"), "
      \n
    • a
    • \n
    \n") }) ## ----------------------------------------------------------------------------- test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ## ----include = FALSE---------------------------------------------------------- # Reset snapshot test snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ## ----------------------------------------------------------------------------- test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) ## ----------------------------------------------------------------------------- # finalise snapshot to in order to get an error snapper$end_file() snapper$start_file("snapshotting.Rmd", "test") ## ----error = TRUE------------------------------------------------------------- try({ bullets <- function(text, id = NULL) { paste0( "\n", paste0("
  • ", text, "
  • \n", collapse = ""), "\n" ) } test_that("bullets", { expect_snapshot(cat(bullets("a"))) expect_snapshot(cat(bullets("a", "b"))) }) }) ## ----------------------------------------------------------------------------- try({ test_that("you can't add a number and a letter", { expect_snapshot(1 + "a") }) }) ## ----------------------------------------------------------------------------- test_that("you can't add a number and a letter", { expect_snapshot(1 + "a", error = TRUE) }) ## ----------------------------------------------------------------------------- test_that("you can't add weird things", { expect_snapshot(error = TRUE, { 1 + "a" mtcars + iris Sys.Date() + factor() }) }) ## ----------------------------------------------------------------------------- check_unnamed <- function(..., call = parent.frame()) { names <- ...names() has_name <- names != "" if (!any(has_name)) { return(invisible()) } named <- names[has_name] cli::cli_abort( c( "All elements of {.arg ...} must be unnamed.", i = "You supplied argument{?s} {.arg {named}}." ), call = call ) } test_that("no errors if all arguments unnamed", { expect_no_error(check_unnamed()) expect_no_error(check_unnamed(1, 2, 3)) }) test_that("actionable feedback if some or all arguments named", { expect_snapshot(error = TRUE, { check_unnamed(x = 1, 2) check_unnamed(x = 1, y = 2) }) }) ## ----------------------------------------------------------------------------- safe_write_lines <- function(lines, path, overwrite = FALSE) { if (file.exists(path) && !overwrite) { cli::cli_abort(c( "{.path {path}} already exists.", i = "Set {.code overwrite = TRUE} to overwrite" )) } writeLines(lines, path) } ## ----------------------------------------------------------------------------- snapper$end_file() snapper$start_file("snapshotting.Rmd", "safe-write-lines") ## ----------------------------------------------------------------------------- test_that("generates actionable error message", { path <- withr::local_tempfile(lines = "") expect_snapshot(safe_write_lines(letters, path), error = TRUE) }) ## ----------------------------------------------------------------------------- snapper$end_file() snapper$start_file("snapshotting.Rmd", "safe-write-lines") ## ----------------------------------------------------------------------------- try({ test_that("generates actionable error message", { path <- withr::local_tempfile(lines = "") expect_snapshot(safe_write_lines(letters, path), error = TRUE) }) }) ## ----------------------------------------------------------------------------- snapper$end_file() snapper$start_file("snapshotting.Rmd", "test-2") ## ----------------------------------------------------------------------------- test_that("generates actionable error message", { path <- withr::local_tempfile(lines = "") expect_snapshot( safe_write_lines(letters, path), error = TRUE, transform = \(lines) gsub(path, "", lines, fixed = TRUE) ) }) ## ----------------------------------------------------------------------------- test_that("can snapshot a simple list", { x <- list(a = list(1, 5, 10), b = list("elephant", "banana")) expect_snapshot_value(x) }) ## ----------------------------------------------------------------------------- knitr::include_graphics("review-image.png") ## ----------------------------------------------------------------------------- knitr::include_graphics("review-text.png") testthat/inst/doc/third-edition.html0000644000176200001440000007310415130237654017300 0ustar liggesusers testthat 3e

    testthat 3e

    testthat 3.0.0 introduces the idea of an “edition” of testthat. An edition is a bundle of behaviours that you have to explicitly choose to use, allowing us to make otherwise backward incompatible changes. This is particularly important for testthat since it has a very large number of packages that use it (almost 5,000 at last count). Choosing to use the 3rd edition allows you to use our latest recommendations for ongoing and new work, while historical packages continue to use the old behaviour.

    (We don’t anticipate creating new editions very often, and they’ll always be matched with major version, i.e. if there’s another edition, it’ll be the fourth edition and will come with testthat 4.0.0.)

    This vignette shows you how to activate the 3rd edition, introduces the main features, and discusses common challenges when upgrading a package. If you have a problem that this vignette doesn’t cover, please let me know, as it’s likely that the problem also affects others.

    library(testthat)
    local_edition(3)

    Activating

    The usual way to activate the 3rd edition is to add a line to your DESCRIPTION:

    Config/testthat/edition: 3

    This will activate the 3rd edition for every test in your package.

    You can also control the edition used for individual tests with testthat::local_edition():

    test_that("I can use the 3rd edition", {
      local_edition(3)
      expect_true(TRUE)
    })
    #> Test passed with 1 success 🎊.

    This is also useful if you’ve switched to the 3rd edition and have a couple of tests that fail. You can use local_edition(2) to revert back to the old behaviour, giving you some breathing room to figure out the underlying issue.

    test_that("I want to use the 2nd edition", {
      local_edition(2)
      expect_true(TRUE)
    })
    #> Test passed with 1 success 🥇.

    Changes

    There are three major changes in the 3rd edition:

    • A number of outdated functions are now deprecated, so you’ll be warned about them every time you run your tests (but they won’t cause R CMD check to fail).

    • testthat no longer silently swallows messages; you now need to deliberately handle them.

    • expect_equal() and expect_identical() now use the waldo package instead of identical() and all.equal(). This makes them more consistent and provides an enhanced display of differences when a test fails.

    Deprecations

    A number of outdated functions have been deprecated. Most of these functions have not been recommended for a number of years, but before the introduction of the edition idea, I didn’t have a good way of preventing people from using them without breaking a lot of code on CRAN.

    • context() is formally deprecated. testthat has been moving away from context() in favour of file names for quite some time, and now you’ll be strongly encouraged remove these calls from your tests.

    • expect_is() is deprecated in favour of the more specific expect_type(), expect_s3_class(), and expect_s4_class(). This ensures that you check the expected class along with the expected OO system.

    • The very old expect_that() syntax is now deprecated. This was an overly clever API that I regretted even before the release of testthat 1.0.0.

    • expect_equivalent() has been deprecated since it is now equivalent (HA HA) to expect_equal(ignore_attr = TRUE).

    • setup() and teardown() are deprecated in favour of test fixtures. See vignette("test-fixtures") for details.

    • expect_known_output(), expect_known_value(), expect_known_hash(), and expect_equal_to_reference() are all deprecated in favour of expect_snapshot_output() and expect_snapshot_value().

    • with_mock() and local_mock() are deprecated; please use with_mocked_bindings() or local_mocked_bindings() instead.

    Fixing these deprecation warnings should be straightforward.

    Warnings

    In the second edition, expect_warning() swallows all warnings regardless of whether or not they match the regexp or class:

    f <- function() {
      warning("First warning")
      warning("Second warning")
      warning("Third warning")
    }
    
    local_edition(2)
    expect_warning(f(), "First")

    In the third edition, expect_warning() captures at most one warning so the others will bubble up:

    local_edition(3)
    expect_warning(f(), "First")
    #> Warning in f(): Second warning
    #> Warning in f(): Third warning

    You can either add additional expectations to catch these warnings, or silence them all with suppressWarnings():

    f() |> 
      expect_warning("First") |> 
      expect_warning("Second") |> 
      expect_warning("Third")
    
    f() |> 
      expect_warning("First") |> 
      suppressWarnings()

    Alternatively, you might want to capture them all in a snapshot test:

    test_that("f() produces expected outputs/messages/warnings", {
      expect_snapshot(f())  
    })
    #> ── Snapshot ────────────────────────────────────────────────────────────────────
    #> ℹ Can't save or compare to reference when testing interactively.
    #> Code
    #>   f()
    #> Condition
    #>   Warning in `f()`:
    #>   First warning
    #>   Warning in `f()`:
    #>   Second warning
    #>   Warning in `f()`:
    #>   Third warning
    #> ────────────────────────────────────────────────────────────────────────────────
    #> ── Skip: f() produces expected outputs/messages/warnings ───────────────────────
    #> Reason: empty test

    The same principle also applies to expect_message(), but message handling has changed in a more radical way, as described next.

    Messages

    For reasons that I can no longer remember, testthat silently ignores all messages. This is inconsistent with other types of output, so as of the 3rd edition, they now bubble up to your test results. You’ll have to explicit ignore them with suppressMessages(), or if they’re important, test for their presence with expect_message().

    waldo

    Probably the biggest day-to-day difference (and the biggest reason to upgrade!) is the use of waldo::compare() inside of expect_equal() and expect_identical(). The goal of waldo is to find and concisely describe the difference between a pair of R objects, and it’s designed specifically to help you figure out what’s gone wrong in your unit tests.

    f1 <- factor(letters[1:3])
    f2 <- ordered(letters[1:3], levels = letters[1:4])
    
    local_edition(2)
    expect_equal(f1, f2)
    #> Error: Expected `f1` to equal `f2`.
    #> Differences:
    #> Attributes: < Component "class": Lengths (1, 2) differ (string compare on first 1) >
    #> Attributes: < Component "class": 1 string mismatch >
    #> Attributes: < Component "levels": Lengths (3, 4) differ (string compare on first 3) >
    
    local_edition(3)
    expect_equal(f1, f2)
    #> Error: Expected `f1` to equal `f2`.
    #> Differences:
    #> `levels(actual)`:   "a" "b" "c"    
    #> `levels(expected)`: "a" "b" "c" "d"

    waldo looks even better in your console because it carefully uses colours to help highlight the differences.

    The use of waldo also makes precise the difference between expect_equal() and expect_identical(): expect_equal() sets tolerance so that waldo will ignore small numerical differences arising from floating point computation. Otherwise the functions are identical (HA HA).

    This change is likely to result in the most work during an upgrade, because waldo can give slightly different results to both identical() and all.equal() in moderately common situations. I believe on the whole the differences are meaningful and useful, so you’ll need to handle them by tweaking your tests. The following changes are most likely to affect you:

    • expect_equal() previously ignored the environments of formulas and functions. This is most like to arise if you are testing models. It’s worth thinking about what the correct values should be, but if that is to annoying you can opt out of the comparison with ignore_function_env or ignore_formula_env.

    • expect_equal() used a combination of all.equal() and a home-grown testthat::compare() which unfortunately used a slightly different definition of tolerance. Now expect_equal() always uses the same definition of tolerance everywhere, which may require tweaks to your existing tolerance values.

    • expect_equal() previously ignored timezone differences when one object had the current timezone set implicitly (with "") and the other had it set explicitly:

      dt1 <- dt2 <- ISOdatetime(2020, 1, 2, 3, 4, 0)
      attr(dt1, "tzone") <- ""
      attr(dt2, "tzone") <- Sys.timezone()
      
      local_edition(2)
      expect_equal(dt1, dt2)
      
      local_edition(3)
      expect_equal(dt1, dt2)
      #> Error: Expected `dt1` to equal `dt2`.
      #> Differences:
      #> `attr(actual, 'tzone')`:   ""               
      #> `attr(expected, 'tzone')`: "America/Chicago"

    Reproducible output

    In the third edition, test_that() automatically calls local_reproducible_output() which automatically sets a number of options and environment variables to ensure output is as reproducible across systems. This includes setting:

    • options(crayon.enabled = FALSE) and options(cli.unicode = FALSE) so that the crayon and cli packages produce raw ASCII output.

    • Sys.setLocale("LC_COLLATE" = "C") so that sorting a character vector returns the same order regardless of the system language.

    • options(width = 80) so print methods always generate the same output regardless of your actual console width.

    See the documentation for more details.

    Upgrading

    The changes lend themselves to the following workflow for upgrading from the 2nd to the 3rd edition:

    1. Activate edition 3. You can let usethis::use_testthat(3) do this for you.
    2. Remove or replace deprecated functions, going over the list of above.
    3. If your output got noisy, quiet things down by either capturing or suppressing warnings and messages.
    4. Inspect test outputs if objects are not “all equal” anymore.

    Alternatives

    You might wonder why we came up with the idea of an “edition”, rather than creating a new package like testthat3. We decided against making a new package because the 2nd and 3rd edition share a very large amount of code, so making a new package would have substantially increased the maintenance burden: the majority of bugs would’ve needed to be fixed in two places.

    If you’re a programmer in other languages, you might wonder why we can’t rely on semantic versioning. The main reason is that CRAN checks all packages that use testthat with the latest version of testthat, so simply incrementing the major version number doesn’t actually help with reducing R CMD check failures on CRAN.

    testthat/inst/doc/test-fixtures.Rmd0000644000176200001440000004031715070045757017145 0ustar liggesusers--- title: "Test fixtures" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Test fixtures} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ## Test hygiene > Take nothing but memories, leave nothing but footprints. > > ― Chief Si'ahl Ideally, a test should leave the world exactly as it found it. But you often need to make changes to exercise every part of your code: - Create a file or directory - Create a resource on an external system - Set an R option - Set an environment variable - Change working directory - Change an aspect of the tested package's state How can you clean up these changes to get back to a clean slate? Scrupulous attention to cleanup is more than just courtesy or being fastidious. It's also self-serving. The state of the world after test `i` is the starting state for test `i + 1`. Tests that change state willy-nilly eventually end up interfering with each other in ways that can be very difficult to debug. Most tests are written with an implicit assumption about the starting state, usually whatever *tabula rasa* means for the target domain of your package. If you accumulate enough sloppy tests, you will eventually find yourself asking the programming equivalent of questions like "Who forgot to turn off the oven?" and "Who didn't clean up after the dog?" (If you've got yourself into this state, testthat provides another tool to help you figure out exactly which test is to blame: `set_state_inspector()`.) It's also important that your setup and cleanup are easy to use when working interactively. When a test fails, you want to be able to quickly recreate the exact environment in which the test is run so you can interactively experiment to figure out what went wrong. This article introduces a powerful technique that allows you to solve both problems: **test fixtures**. We'll begin by discussing some canned tools, then learn about the underlying theory, discuss exactly what a test fixture is, and finish with a few examples. ```{r} library(testthat) ``` ## `local_` helpers We'll begin by giving you the minimal knowledge needed to change global state *just* within your test. The withr package provides a number of functions that temporarily change the state of the world, carefully undoing the changes when the current function or test finishes: | Do / undo this | withr function | |-----------------------------|-------------------| | Create a file | `local_tempfile()`| | Create a directory | `local_tempdir()` | | Set an R option | `local_options()` | | Set an environment variable | `local_envvar()` | | Change working directory | `local_dir()` | (You can see a full list at but these five are by far the most commonly used.) These allow you to control options that would otherwise be painful. For example, imagine you're testing base R code that rounds numbers to a fixed number of places when printing. You could write code like this: ```{r} test_that("print() respects digits option", { x <- 1.23456789 withr::local_options(digits = 1) expect_equal(capture.output(x), "[1] 1") withr::local_options(digits = 5) expect_equal(capture.output(x), "[1] 1.2346") }) ``` If you write a lot of code like this in your tests, you might decide you want a helper function or **test fixture** that reduces the duplication. Fortunately withr's local functions allow us to solve this problem by providing an `.local_envir` or `envir` argument that controls when cleanup occurs. The exact details of how this works are rather complicated, but fortunately there's a common pattern you can use without understanding all the details. Your helper function should always have an `env` argument that defaults to `parent.frame()`, which you pass to the `.local_envir` argument of `local_()`: ```{r} local_digits <- function(sig_digits, env = parent.frame()) { withr::local_options(digits = sig_digits, .local_envir = env) # mark that this function is called for its side-effects not its return value invisible() } ``` ## Foundations Before we go further, let's lay some foundations to help you understand how `local_` functions work. We'll motivate the discussion with a `sloppy()` function that prints a number with a specific number of significant digits by adjusting an R option: ```{r include = FALSE} op <- options() ``` ```{r} sloppy <- function(x, sig_digits) { options(digits = sig_digits) print(x) } pi sloppy(pi, 2) pi ``` ```{r include = FALSE} options(op) ``` Notice how `pi` prints differently before and after the call to `sloppy()`. Calling `sloppy()` has a side effect: it changes the `digits` option globally, not just within its own scope. This is what we want to avoid[^1]. [^1]: Don't worry, I'm restoring global state (specifically, the `digits` option) behind the scenes here. ### `on.exit()` The first function you need to know about is base R's `on.exit()`. `on.exit()` calls the code supplied to its first argument when the current function exits, regardless of whether it returns a value or throws an error. You can use `on.exit()` to clean up after yourself by ensuring that every mess-making function call is paired with an `on.exit()` call that cleans up. We can use this idea to turn `sloppy()` into `neat()`: ```{r} neat <- function(x, sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) print(x) } pi neat(pi, 2) pi ``` Here we make use of a useful pattern that `options()` implements: when you call `options(digits = sig_digits)`, it both sets the `digits` option *and* (invisibly) returns the previous value of digits. We can then use that value to restore the previous options. `on.exit()` also works in tests: ```{r} test_that("can print one digit of pi", { op <- options(digits = 1) on.exit(options(op), add = TRUE, after = FALSE) expect_output(print(pi), "3") }) pi ``` There are three main drawbacks to `on.exit()`: - You should always call it with `add = TRUE` and `after = FALSE`. These ensure that the call is **added** to the list of deferred tasks (instead of replacing them) and is added to the **front** of the stack (not the back), so that cleanup occurs in reverse order to setup. These arguments only matter if you're using multiple `on.exit()` calls, but it's a good habit to always use them to avoid potential problems down the road. - It doesn't work outside a function or test. If you run the following code in the global environment, you won't get an error, but the cleanup code will never be run: ```{r, eval = FALSE} op <- options(digits = 1) on.exit(options(op), add = TRUE, after = FALSE) ``` This is annoying when you are running tests interactively. - You can't program with it; `on.exit()` always works inside the *current* function, so you can't wrap up repeated `on.exit()` code in a helper function. To resolve these drawbacks, we use `withr::defer()`. ### `withr::defer()` `withr::defer()` resolves the main drawbacks of `on.exit()`. First, it has the behavior we want by default; no extra arguments needed: ```{r} neat <- function(x, sig_digits) { op <- options(digits = sig_digits) withr::defer(options(op)) print(x) } ``` Second, it works when called in the global environment. Since the global environment isn't perishable, like a test environment is, you have to call `deferred_run()` explicitly to execute the deferred events. You can also clear them, without running, with `deferred_clear()`. ```{r, eval = FALSE} withr::defer(print("hi")) #> Setting deferred event(s) on global environment. #> * Execute (and clear) with `deferred_run()`. #> * Clear (without executing) with `deferred_clear()`. withr::deferred_run() #> [1] "hi" ``` Finally, `withr::defer()` lets you pick which function to bind the cleanup behavior to. This makes it possible to create helper functions. ### "Local" helpers Imagine we have many functions where we want to temporarily set the digits option. Wouldn't it be nice if we could write a helper function to automate this? Unfortunately, we can't write a helper with `on.exit()`: ```{r} local_digits <- function(sig_digits) { op <- options(digits = sig_digits) on.exit(options(op), add = TRUE, after = FALSE) } neater <- function(x, sig_digits) { local_digits(1) print(x) } neater(pi) ``` This code doesn't work because the cleanup happens too soon, when `local_digits()` exits, not when `neater()` finishes. Fortunately, `withr::defer()` allows us to solve this problem by providing an `envir` argument that allows you to control when cleanup occurs. The exact details of how this works are rather complicated, but fortunately there's a common pattern you can use without understanding all the details. Your helper function should always have an `env` argument that defaults to `parent.frame()`, which you pass to the second argument of `defer()`: ```{r} local_digits <- function(sig_digits, env = parent.frame()) { op <- options(digits = sig_digits) withr::defer(options(op), env) } neater(pi) ``` Just like `on.exit()` and `defer()`, our helper also works within tests: ```{r} test_that("withr lets us write custom helpers for local state manipulation", { local_digits(1) expect_output(print(exp(1)), "3") local_digits(3) expect_output(print(exp(1)), "2.72") }) print(exp(1)) ``` We always call these helper functions `local_*`; "local" here refers to the fact that the state change persists only locally, for the lifetime of the associated function or test. Another reason we call them "local" is that you can also use the `local()` function if you want to scope their effect to a smaller part of the test: ```{r} test_that("local_options() only affects a minimal amount of code", { withr::local_options(x = 1) expect_equal(getOption("x"), 1) local({ withr::local_options(x = 2) expect_equal(getOption("x"), 2) }) expect_equal(getOption("x"), 1) }) getOption("x") ``` ## Test fixtures Testing is often demonstrated with cute little tests and functions where all the inputs and expected results can be inlined. But in real packages, things aren't always so simple, and functions often depend on global state. For example, take this variant on `message()` that only shows a message if the `verbose` option is `TRUE`. How would you test that setting the option does indeed silence the message? ```{r} message2 <- function(...) { if (!isTRUE(getOption("verbose"))) { return() } message(...) } ``` In some cases, it's possible to make the global state an explicit argument to the function. For example, we could refactor `message2()` to make the verbosity an explicit argument: ```{r} message3 <- function(..., verbose = getOption("verbose")) { if (!isTRUE(verbose)) { return() } message(...) } ``` Making external state explicit is often worthwhile because it makes clearer exactly what inputs determine the outputs of your function. But it's simply not possible in many cases. That's where test fixtures come in: they allow you to temporarily change global state to test your function. Test fixture is a pre-existing term in the software engineering world (and beyond): > A test fixture is something used to consistently test some item, device, or piece of software. > > --- [Wikipedia](https://en.wikipedia.org/wiki/Test_fixture) A **test fixture** is just a `local_*` function that you use to change state in such a way that you can reach inside and test parts of your code that would otherwise be challenging. For example, here's how you could use `withr::local_options()` as a test fixture to test `message2()`: ```{r} test_that("message2() output depends on verbose option", { withr::local_options(verbose = TRUE) expect_message(message2("Hi!")) withr::local_options(verbose = FALSE) expect_message(message2("Hi!"), NA) }) ``` ### Case study: usethis One place that we use test fixtures extensively is in the usethis package ([usethis.r-lib.org](https://usethis.r-lib.org)), which provides functions for looking after the files and folders in R projects, especially packages. Many of these functions only make sense in the context of a package, which means to test them, we also have to be working inside an R package. We need a way to quickly spin up a minimal package in a temporary directory, then test some functions against it, then destroy it. To solve this problem we create a test fixture, which we place in `R/test-helpers.R` so that it's available for both testing and interactive experimentation: ```{r, eval = FALSE} local_create_package <- function(dir = file_temp(), env = parent.frame()) { old_project <- proj_get_() # create new folder and package create_package(dir, open = FALSE) # A withr::defer(fs::dir_delete(dir), envir = env) # -A # change working directory withr::local_dir(dir, .local_envir = env) # B + -B # switch to new usethis project proj_set(dir) # C withr::defer(proj_set(old_project, force = TRUE), envir = env) # -C dir } ``` Note that the cleanup automatically unfolds in the opposite order from the setup. Setup is `A`, then `B`, then `C`; cleanup is `-C`, then `-B`, then `-A`. This is important because we must create directory `dir` before we can make it the working directory, and we must restore the original working directory before we can delete `dir`—we can't delete `dir` while it's still the working directory! `local_create_package()` is used in over 170 tests. Here's one example that checks that `usethis::use_roxygen_md()` does the setup necessary to use roxygen2 in a package, with markdown support turned on. All 3 expectations consult the DESCRIPTION file, directly or indirectly. So it's very convenient that `local_create_package()` creates a minimal package, with a valid `DESCRIPTION` file, for us to test against. And when the test is done --- poof! --- the package is gone. ```{r eval = FALSE} test_that("use_roxygen_md() adds DESCRIPTION fields", { pkg <- local_create_package() use_roxygen_md() expect_true(uses_roxygen_md()) expect_equal(desc::desc_get("Roxygen", pkg)[[1]], "list(markdown = TRUE)") expect_true(desc::desc_has_fields("RoxygenNote", pkg)) }) ``` ## Scope So far we have applied our test fixture to individual tests, but it's also possible to apply them to a file or package. ### File If you move the `local_*()` call outside of a `test_that()` block, it will affect all tests that come after it. This means that by calling the test fixture at the top of the file, you can change the behavior for all tests. This has both advantages and disadvantages: - If you would otherwise have called the fixture in every test, you've saved yourself a bunch of work and duplicate code. - On the downside, if your test fails and you want to recreate the failure in an interactive environment so you can debug, you need to remember to run all the setup code at the top of the file first. Generally, I think it's better to copy and paste test fixtures across many tests --- sure, it adds some duplication to your code, but it makes debugging test failures so much easier. ### Package To run code before any test is run, you can create a file called `tests/testthat/setup.R`. If the code in this file needs clean up, you can use the special `teardown_env()`: ```{r, eval = FALSE} # Run before any test write.csv(mtcars, "mtcars.csv") # Run after all tests withr::defer(unlink("mtcars.csv"), teardown_env()) ``` Setup code is typically best used to create external resources that are needed by many tests. It's best kept to a minimum because you will have to manually run it before interactively debugging tests. ## Other challenges A collection of miscellaneous problems that don't fit elsewhere: - There are a few base functions that are hard to test because they depend on state that you can't control. One such example is `interactive()`: there's no way to write a test fixture that allows you to pretend that interactive is either `TRUE` or `FALSE`. So we now usually use `rlang::is_interactive()`, which can be controlled by the `rlang_interactive` option. - If you're using a test fixture in a function, be careful about what you return. For example, if you write a function that does `dir <- create_local_package()`, you shouldn't return `dir`, because after the function returns, the directory will no longer exist. testthat/inst/doc/parallel.Rmd0000644000176200001440000002061115056337671016111 0ustar liggesusers--- title: "Running tests in parallel" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Running tests in parallel} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} editor_options: markdown: wrap: sentence --- ```{r setup, include = FALSE} library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") ``` ## Setup To enable parallel testing, you must first be using the 3rd edition[^1]. Then add the following line to the `DESCRIPTION`: [^1]: See `vignette("third-edition")` for details. ``` Config/testthat/parallel: true ``` If needed (for example, for debugging) you can temporarily suppress parallel testing with `Sys.setenv(TESTTHAT_PARALLEL = "false")`. By default, testthat will use `getOption("Ncpus", 2)` cores. To increase that value for your development machine we recommend setting `TESTTHAT_CPUS` in your `.Renviron`. The easiest way to do that is call `usethis::edit_r_environ()` and then add something like the following: ``` TESTTHAT_CPUS=4 ``` Tests are run in alphabetical order by default, but you can often improve performance by starting the slowest tests first. Specify these tests by supplying a comma separated list of glob patterns[^2] to the `Config/testthat/start-first` field in your `DESCRIPTION`, e.g.: [^2]: See `?utils::glob2rx` for details ``` Config/testthat/start-first: watcher, parallel* ``` ## Basic operation Each worker begins by loading testthat and the package being tested. It then runs any setup files (so if you have existing setup files you'll need to make sure they work when executed in parallel). testthat runs test *files* in parallel. Once the worker pool is initialized, testthat then starts sending test files to workers, by default in alphabetical order: as soon as a subprocess has finished, it receives another file, until all files are done. This means that state is persisted across test files: options are *not* reset, loaded packages are *not* unloaded, the global environment is *not* cleared, etc. You are responsible for making sure each file leaves the world as it finds it. ## Common problems - If tests fail stochastically (i.e. they sometimes work and sometimes fail) you may have accidentally introduced a dependency between your test files. This sort of dependency is hard to track down due to the random nature, and you'll need to check all tests to make sure that they're not accidentally changing global state. `set_state_inspector()` will make this easier. - If you use [packaged scope test fixtures](https://testthat.r-lib.org/articles/test-fixtures.html#package), you'll need to review them to make sure that they work in parallel. For example, if you were previously creating a temporary database in the test directory, you'd need to instead create it in the session temporary directory so that each process gets its own independent version. ## Performance There is some overhead associated with running tests in parallel: - Startup cost is linear in the number of subprocesses, because we need to create them in a loop. This is about 50ms on my laptop. Each subprocess needs to load testthat and the tested package, this happens in parallel, and we cannot do too much about it. - Clean up time is again linear in the number of subprocesses, and it about 80ms per subprocess on my laptop. - It seems that sending a message (i.e. a passing or failing expectation) is about 2ms currently. This is the total cost that includes sending the message, receiving it, and replying it to a non-parallel reporter. This overhead generally means that if you have many test files that take a short amount of time, you're unlikely to see a huge benefit by using parallel tests. For example, testthat itself takes about 10s to run tests in serial, and 8s to run the tests in parallel. ## Reporters ### Default reporters See `default_reporter()` for how testthat selects the default reporter for `devtools::test()` and `testthat::test_local()`. In short, testthat selects `ProgressReporter` for non-parallel and `ParallelProgressReporter` for parallel tests by default. (Other testthat test functions, like `test_check()`, `test_file()` , etc. select different reporters by default.) ### Parallel support Most reporters support parallel tests. If a reporter is passed to `devtools::test()`, `testthat::test_dir()`, etc. directly, and it does not support parallel tests, then testthat runs the test files sequentially. Currently the following reporters *don't* support parallel tests: - `DebugReporter`, because it is not currently possible to debug subprocesses. - `JunitReporter`, because this reporter records timing information for each test block, and this is currently only available for reporters that support multiple active test files. (See "Writing parallel reporters" below.) - `LocationReporter` because testthat currently does not include location information for successful tests when running in parallel, to minimize messaging between the processes. - `StopReporter`, as this is a reporter that testthat uses for interactive `expect_that()` calls. The other built-in reporters all support parallel tests, with some subtle differences: - Reporters that stop after a certain number of failures can only stop at the end of a test file. - Reporters report all information about a file at once, unless they support *parallel updates*. E.g. `ProgressReporter` does not update its display until a test file is complete. - The standard output and standard error, i.e. `print()`, `cat()`, `message()`, etc. output from the test files are lost currently. If you want to use `cat()` or `message()` for print-debugging test cases, then the best is to temporarily run tests sequentially, by changing the `Config` entry in `DESCRIPTION` or setting `Sys.setenv(TESTTHAT_PARALLEL = "false")`. ### Writing parallel reporters To support parallel tests, a reporter must be able to function when the test files run in a subprocess. For example `DebugReporter` does not support parallel tests, because it requires direct interaction with the frames in the subprocess. When running in parallel, testthat does not provide location information (source references) for test successes. To support parallel tests, a reporter must set `self$capabilities$parallel_support` to `TRUE` in its `initialize()` method: ``` r ... initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE ... } ... ``` When running in parallel, testthat runs the reporter in the main process, and relays information between the reporter and the test code transparently. (Currently the reporter does not even know that the tests are running in parallel.) If a reporter does not support parallel updates (see below), then testthat internally caches all calls to the reporter methods from subprocesses, until a test file is complete. This is because these reporters are not prepared for running multiple test files concurrently. Once a test file is complete, testthat calls the reporter's `$start_file()` method, relays all `$start_test()` , `$end_test()`, `$add_result()`, etc. calls in the order they came in from the subprocess, and calls `$end_file()` . ### Parallel updates The `ParallelProgressReporter` supports parallel updates. This means that once a message from a subprocess comes in, the reporter is updated immediately. For this to work, a reporter must be able to handle multiple test files concurrently. A reporter declares parallel update support by setting `self$capabilities$parallel_updates` to `TRUE`: ``` r ... initialize = function(...) { super$initialize(...) self$capabilities$parallel_support <- TRUE self$capabilities$parallel_updates <- TRUE ... } ... ``` For these reporters, testthat does not cache the messages from the subprocesses. Instead, when a message comes in: - It calls the `$start_file()` method, letting the reporter know which file the following calls apply to. This means that the reporter can receive multiple `$start_file()` calls for the same file. - Then relays the message from the subprocess, calling the appropriate `$start_test()` , `$add_result()`, etc. method. testthat also calls the new `$update()` method of the reporter regularly, even if it does not receive any messages from the subprocess. (Currently aims to do this every 100ms, but there are no guarantees.) The `$update()` method may implement a spinner to let the user know that the tests are running. testthat/inst/doc/challenging-tests.Rmd0000644000176200001440000001776315072252215017732 0ustar liggesusers--- title: "Testing challenging functions" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Testing challenging functions} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r} #| include: false library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") # Pretend we're snapshotting snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") # Pretend we're testing testthat so we can use mocking Sys.setenv(TESTTHAT_PKG = "testthat") ``` This vignette is a quick reference guide for testing challenging functions. It's organized by problem type rather than technique, so you can quickly skim the whole vignette, spot the problem you're facing, and then learn more about useful tools for solving it. In it, you'll learn how to overcome the following challenges: * Functions with implicit inputs, like options and environment variables. * Random number generators. * Tests that can't be run in some environments. * Testing web APIs. * Testing graphical output. * User interaction. * User-facing text. * Repeated code. ## Options and environment variables If your function depends on options or environment variables, first try refactoring the function to make the [inputs explicit](https://design.tidyverse.org/inputs-explicit.html). If that's not possible, use functions like `withr::local_options()` or `withr::local_envvar()` to temporarily change options and environment values within a test. Learn more in `vignette("test-fixtures")`. ## Random numbers What happens if you want to test a function that relies on randomness in some way? If you're writing a random number generator, you probably want to generate a large quantity of random numbers and then apply some statistical test. But what if your function just happens to use a little bit of pre-existing randomness? How do you make your tests repeatable and reproducible? Under the hood, random number generators generate different numbers because they update a special `.Random.seed` variable stored in the global environment. You can temporarily set this seed to a known value to make your random numbers reproducible with `withr::local_seed()`, making random numbers a special case of test fixtures (`vignette("test-fixtures")`). Here's a simple example showing how you might test the basic operation of a function that rolls a die: ```{r} #| label: random-local-seed dice <- function() { sample(6, 1) } test_that("dice returns different numbers", { withr::local_seed(1234) expect_equal(dice(), 4) expect_equal(dice(), 2) expect_equal(dice(), 6) }) ``` Alternatively, you might want to mock (`vignette("mocking")`) the function to eliminate randomness. ```{r} #| label: random-mock roll_three <- function() { sum(dice(), dice(), dice()) } test_that("three dice adds values of individual calls", { local_mocked_bindings(dice = mock_output_sequence(1, 2, 3)) expect_equal(roll_three(), 6) }) ``` When should you set the seed and when should you use mocking? As a general rule of thumb, set the seed when you want to test the actual random behavior, and use mocking when you want to test the logic that uses the random results. ## Some tests can't be run in some circumstances You can skip a test without it passing or failing if you can't or don't want to run it (e.g., it's OS dependent, it only works interactively, or it shouldn't be tested on CRAN). Learn more in `vignette("skipping")`. ## HTTP requests If you're trying to test functions that rely on HTTP requests, we recommend using {vcr} or {httptest2}. These packages both allow you to interactively record HTTP responses and then later replay them in tests. This is a specialized type of mocking (`vignette("mocking")`) that works with {httr} and {httr2} to isolates your tests from failures in the underlying API. If your package is going to CRAN, you **must** either use one of these packages or use `skip_on_cran()` for all internet-facing tests. Otherwise, you are at high risk of failing `R CMD check` if the underlying API is temporarily down. This sort of failure causes extra work for the CRAN maintainers and extra hassle for you. ## Graphics The only type of testing you can use for graphics is snapshot testing (`vignette("snapshotting")`) via `expect_snapshot_file()`. Graphical snapshot testing is surprisingly challenging because you need pixel-perfect rendering across multiple versions of multiple operating systems, and this is hard, mostly due to imperceptible differences in font rendering. Fortunately we've needed to overcome these challenges in order to test {ggplot2}, and you can benefit from our experience by using {vdiffr} when testing graphical output. ## User interaction If you're testing a function that relies on user feedback (e.g. from `readline()`, `utils::menu()`, or `utils::askYesNo()`), you can use mocking (`vignette("mocking")`) to return fixed values within the test. For example, imagine that you've written the following function that asks the user if they want to continue: ```{r} #| label: continue continue <- function(prompt) { cat(prompt, "\n", sep = "") repeat { val <- readline("Do you want to continue? (y/n) ") if (val %in% c("y", "n")) { return(val == "y") } cat("! You must enter y or n\n") } } readline <- NULL ``` You could test its behavior by mocking `readline()` and using a snapshot test: ```{r} #| label: mock-readline test_that("user must respond y or n", { mock_readline <- local({ i <- 0 function(prompt) { i <<- i + 1 cat(prompt) val <- if (i == 1) "x" else "y" cat(val, "\n", sep = "") val } }) local_mocked_bindings(readline = mock_readline) expect_snapshot(val <- continue("This is dangerous")) expect_true(val) }) ``` If you don't care about reproducing the output of `continue()` and just want to recreate its return value, you can use `mock_output_sequence()`. This creates a function that returns the input supplied to `mock_output_sequence()` in sequence: the first input at the first call, the second input at the second call, etc. The following code shows how it works and how you might use it to test `readline()`: ```{r} f <- mock_output_sequence(1, 12, 123) f() f() f() ``` And ```{r} test_that("user must respond y or n", { local_mocked_bindings(readline = mock_output_sequence("x", "y")) expect_true(continue("This is dangerous")) }) ``` If you were testing the behavior of some function that uses `continue()`, you might want to mock `continue()` instead of `readline()`. For example, the function below requires user confirmation before overwriting an existing file. In order to focus our tests on the behavior of just this function, we mock `continue()` to return either `TRUE` or `FALSE` without any user messaging. ```{r} #| label: mock-continue save_file <- function(path, data) { if (file.exists(path)) { if (!continue("`path` already exists")) { stop("Failed to continue") } } writeLines(data, path) } test_that("save_file() requires confirmation to overwrite file", { path <- withr::local_tempfile(lines = letters) local_mocked_bindings(continue = function(...) TRUE) save_file(path, "a") expect_equal(readLines(path), "a") local_mocked_bindings(continue = function(...) FALSE) expect_snapshot(save_file(path, "a"), error = TRUE) }) ``` ## User-facing text Errors, warnings, and other user-facing text should be tested to ensure they're both actionable and consistent across the package. Obviously, it's not possible to test this automatically, but you can use snapshots (`vignette("snapshotting")`) to ensure that user-facing messages are clearly shown in PRs and easily reviewed by another human. ## Repeated code If you find yourself repeating the same set of expectations again and again across your test suite, it may be a sign that you should design your own expectation. Learn how in `vignette("custom-expectation")`. testthat/inst/doc/skipping.R0000644000176200001440000000314615130237646015616 0ustar liggesusers## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----setup-------------------------------------------------------------------- library(testthat) ## ----------------------------------------------------------------------------- # # Only run test if a token file is available # skip_if_not(file.exists("secure-token.json")) # # # Only run test if R has memory profiling capabilities # skip_if_not(capabilities("profmem")) # # # Only run if we've opted-in to slow tests with an env var # skip_if(Sys.getenv("RUN_SLOW_TESTS") == "true") ## ----------------------------------------------------------------------------- skip_if_dangerous <- function() { if (!identical(Sys.getenv("DANGER"), "")) { skip("Not run in dangerous environments.") } else { invisible() } } ## ----eval = FALSE------------------------------------------------------------- # convert_markdown_to_html <- function(in_path, out_path, ...) { # if (rmarkdown::pandoc_available("2.0")) { # from <- "markdown+gfm_auto_identifiers-citations+emoji+autolink_bare_uris" # } else if (rmarkdown::pandoc_available("1.12.3")) { # from <- "markdown_github-hard_line_breaks+tex_math_dollars+tex_math_single_backslash+header_attributes" # } else { # if (is_testing()) { # testthat::skip("Pandoc not available") # } else { # abort("Pandoc not available") # } # } # # ... # } ## ----------------------------------------------------------------------------- is_testing <- function() { identical(Sys.getenv("TESTTHAT"), "true") } testthat/inst/doc/challenging-tests.R0000644000176200001440000000521315130237641017375 0ustar liggesusers## ----------------------------------------------------------------------------- library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") # Pretend we're snapshotting snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") # Pretend we're testing testthat so we can use mocking Sys.setenv(TESTTHAT_PKG = "testthat") ## ----------------------------------------------------------------------------- dice <- function() { sample(6, 1) } test_that("dice returns different numbers", { withr::local_seed(1234) expect_equal(dice(), 4) expect_equal(dice(), 2) expect_equal(dice(), 6) }) ## ----------------------------------------------------------------------------- roll_three <- function() { sum(dice(), dice(), dice()) } test_that("three dice adds values of individual calls", { local_mocked_bindings(dice = mock_output_sequence(1, 2, 3)) expect_equal(roll_three(), 6) }) ## ----------------------------------------------------------------------------- continue <- function(prompt) { cat(prompt, "\n", sep = "") repeat { val <- readline("Do you want to continue? (y/n) ") if (val %in% c("y", "n")) { return(val == "y") } cat("! You must enter y or n\n") } } readline <- NULL ## ----------------------------------------------------------------------------- test_that("user must respond y or n", { mock_readline <- local({ i <- 0 function(prompt) { i <<- i + 1 cat(prompt) val <- if (i == 1) "x" else "y" cat(val, "\n", sep = "") val } }) local_mocked_bindings(readline = mock_readline) expect_snapshot(val <- continue("This is dangerous")) expect_true(val) }) ## ----------------------------------------------------------------------------- f <- mock_output_sequence(1, 12, 123) f() f() f() ## ----------------------------------------------------------------------------- test_that("user must respond y or n", { local_mocked_bindings(readline = mock_output_sequence("x", "y")) expect_true(continue("This is dangerous")) }) ## ----------------------------------------------------------------------------- save_file <- function(path, data) { if (file.exists(path)) { if (!continue("`path` already exists")) { stop("Failed to continue") } } writeLines(data, path) } test_that("save_file() requires confirmation to overwrite file", { path <- withr::local_tempfile(lines = letters) local_mocked_bindings(continue = function(...) TRUE) save_file(path, "a") expect_equal(readLines(path), "a") local_mocked_bindings(continue = function(...) FALSE) expect_snapshot(save_file(path, "a"), error = TRUE) }) testthat/inst/doc/special-files.Rmd0000644000176200001440000001077215056336252017036 0ustar liggesusers--- title: "Special files" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Special files} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` ```{r setup} #| include: false library(testthat) ``` This vignette describes the various special files that testthat understands: test, helper, setup/teardown, snapshot, and everything else. ## Test files These are the bread and butter of testthat. Test files live in `tests/testthat/`, start with either `test-` or `test_`, and end with `.r` or `.R`. We recommend organising your test files so that there's a one-to-one correspondence between the files in `R/` and the files in `tests/testthat/` so that (e.g.) `R/myfile.R` has a matching `tests/testthat/test-myfile.R`. This correspondence is maintained by functions like `usethis::use_r()` and `usethis::use_test()` and is taken advantage of by functions like `devtools::test_active_file()` and `devtools::test_coverage_active_file()`. Test files are executed in alphabetical order, but you should strive to avoid dependencies between test files. In principle, you should be able to be run your test files in any order or even at the same time. ## Helper files Helper files live in `tests/testthat/`, start with `helper`, and end with `.r` or `.R`. They are sourced by `devtools::load_all()` (so they're available interactively when developing your packages) and by `test_check()` and friends (so that they're available no matter how your tests are executed). Helper files are a useful place for functions that you've extracted from repeated code in your tests, whether that be test fixtures (`vignette("test-fixtures")`), custom expectations (`vignette("custom-expectation")`), or skip helpers (`vignette("skipping")`). ## Setup files Setup files live in `tests/testthat/`, start with `setup`, and end with `.r` or `.R`. Typically there is only one setup file which, by convention, is `tests/testthat/setup.R`. Setup files are sourced by `test_check()` and friends (so that they're available no matter how your tests are executed), but they are *not* sourced by `devtools::load_all()`. Setup files are good place to put truly global test setup that would be impractical to build into every single test and that might be tailored for test execution in non-interactive or remote environments. Examples: - Turning off behaviour aimed at an interactive user, such as messaging or writing to the clipboard. - Setting up a cache folder. If any of your setup should be reversed after test execution (i.e. it needs to be torn down), we recommend maintaining that teardown code alongside the setup code, in `setup.R`, because this makes it easier to ensure they stay in sync. The artificial environment `teardown_env()` exists as a magical handle to use in `withr::defer()` and `withr::local_*()`. A legacy approach (which still works, but is no longer recommended) is to put teardown code in `tests/testthat/teardown.R`. Here's a `setup.R` example from the reprex package, where we turn off clipboard and HTML preview functionality during testing: ```{r eval = FALSE} op <- options(reprex.clipboard = FALSE, reprex.html_preview = FALSE) withr::defer(options(op), teardown_env()) ``` Since we are just modifying options here, we can be even more concise and use the pre-built function `withr::local_options()` and pass `teardown_env()` as the `.local_envir`: ```{r eval = FALSE} withr::local_options( list(reprex.clipboard = FALSE, reprex.html_preview = FALSE), .local_envir = teardown_env() ) ``` ### Teardown files Teardown files live in `tests/testhat/`, start with `teardown` and end with `.r` or `.R`. They are executed after the tests are run, but we no longer recommend using them as it's easier to check that you clean up every mess that you make if you interleave setup and tear down code as described above. ## Snapshot files Snapshot files live in `tests/testthat/_snaps/`. Snapshot file are named automatically based on the name of the test file so that `tests/testthat/test-one.R` will generated `tests/testthat/_snaps/one.md`. Learn more about snapshot tests in `vignette("snapshotting")`. ## Other files and folders Other files and folders in `tests/testthat/` are ignored by testthat, making them a good place to store persistent test data. Since the precise location of the `test/testthat/` directory varies slightly depending on how you're running the test, we recommend creating paths to these files and directories using `test_path()`. testthat/inst/doc/mocking.Rmd0000644000176200001440000003124415067547665015760 0ustar liggesusers--- title: "Mocking" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Mocking} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r} #| include: false library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") # Pretend we're snapshotting snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") # Pretend we're testing testthat so we can use mocking Sys.setenv(TESTTHAT_PKG = "testthat") ``` Mocking allows you to temporarily replace the implementation of a function with something that makes it easier to test. It's useful when testing failure scenarios that are hard to generate organically (e.g., what happens if dependency X isn't installed?), making tests more reliable, and making tests faster. It's also a general escape hatch to resolve almost any challenging testing problem. That said, mocking comes with downsides too: it's an advanced technique that can lead to brittle tests or tests that silently conceal problems. You should only use it when all other approaches fail. (If, like me, you're confused as to why you'd want to cruelly make fun of your tests, mocking here is used in the sense of making a fake or simulated version of something, i.e., a mock-up.) testthat's primary mocking tool is `local_mocked_bindings()` which is used to mock functions and is the focus of this vignette. But it also provides other tools for specialized cases: you can use `local_mocked_s3_method()` to mock an S3 method, `local_mocked_s4_method()` to mock an S4 method, and `local_mocked_r6_class()` to mock an R6 class. Once you understand the basic idea of mocking, it should be straightforward to apply these other tools where needed. In this vignette, we'll start by illustrating the basics of mocking with a few examples, continue to some real-world case studies from throughout the tidyverse, then finish up with the technical details so you can understand the tradeoffs of the current implementation. ## Getting started with mocking Let's begin by motivating mocking with a simple example. Imagine you're writing a function like `rlang::check_installed()`. The goal of this function is to check if a package is installed, and if not, give a nice error message. It also takes an optional `min_version` argument that you can use to enforce a version constraint. A simple base R implementation might look something like this: ```{r} check_installed <- function(pkg, min_version = NULL) { if (!requireNamespace(pkg, quietly = TRUE)) { stop(sprintf("{%s} is not installed.", pkg)) } if (!is.null(min_version)) { pkg_version <- packageVersion(pkg) if (pkg_version < min_version) { stop(sprintf( "{%s} version %s is installed, but %s is required.", pkg, pkg_version, min_version )) } } invisible() } ``` Now that we've written this function, we want to test it. There are many ways we might tackle this, but it's reasonable to start by testing the case where we don't specify a minimum version. To do this, we need to come up with a package we know is installed and a package we know isn't installed: ```{r} test_that("check_installed() checks package is installed", { expect_no_error(check_installed("testthat")) expect_snapshot(check_installed("doesntexist"), error = TRUE) }) ``` This is probably fine as we certainly know that testthat must be installed but it feels a little fragile as it depends on external state that we don't control. While it's pretty unlikely, if someone does create a `doesntexist` package, this test will no longer work. As a general principle, the less your tests rely on state outside of your control, the more robust and reliable they'll be. Next we want to check the case where we specify a minimum version, and again we need to make up some inputs: ```{r} test_that("check_installed() checks minimum version", { expect_no_error(check_installed("testthat", "1.0.0")) expect_snapshot(check_installed("testthat", "99.99.999"), error = TRUE) }) ``` Again, this is probably safe (since I'm unlikely to release 90+ new versions of testthat), but if you look at the snapshot message carefully, you'll notice that it includes the current version of testthat. That means every time a new version of testthat is released, we'll have to update the snapshot. We could use the `transform` argument to fix this: ```{r} test_that("check_installed() checks minimum version", { expect_no_error(check_installed("testthat", "1.0.0")) expect_snapshot( check_installed("testthat", "99.99.999"), error = TRUE, transform = function(lines) gsub(packageVersion("testthat"), "", lines) ) }) ``` But it's starting to feel like we've accumulating more and more hacks. So let's take a fresh look and see how mocking might help us. The basic idea of mocking is to temporarily replace the implementation of functions being used by the function we're testing. Here we're testing `check_installed()` and want to mock `requireNamespace()` and `packageVersion()` so we can control their versions. There's a small wrinkle here in that `requireNamespace` and `packageVersion` are base functions, not our functions, so we need to make bindings in our package namespace so we can mock them (we'll come back to why later). ```{r} requireNamespace <- NULL packageVersion <- NULL ``` For the first test, we mock `requireNamespace()` twice: first to always return `TRUE` (pretending every package is installed), and then to always return `FALSE` (pretending that no packages are installed). Now the test is completely self-contained and doesn't depend on what packages happen to be installed. ```{r} test_that("check_installed() checks package is installed", { local_mocked_bindings(requireNamespace = function(...) TRUE) expect_no_error(check_installed("package-name")) local_mocked_bindings(requireNamespace = function(...) FALSE) expect_snapshot(check_installed("package-name"), error = TRUE) }) ``` For the second test, we mock `requireNamespace()` to return `TRUE`, and then `packageVersion()` to always return version 2.0.0. This again ensures our test is independent of system state. ```{r} test_that("check_installed() checks minimum version", { local_mocked_bindings( requireNamespace = function(...) TRUE, packageVersion = function(...) numeric_version("2.0.0") ) expect_no_error(check_installed("package-name", "1.0.0")) expect_snapshot(check_installed("package-name", "3.4.5"), error = TRUE) }) ``` ## Case studies To give you more experience with mocking, this section looks at a few places where we use mocking in the tidyverse: * Testing `testthat::skip_on_os()` regardless of what operating system is running the test. * Speeding up `usethis::use_release_issue()`. * Testing the passage of time in `httr2::req_throttle()`. These situations are all a little complex, as this is the nature of mocking: if you can use a simpler technique, you should. Mocking is only needed for otherwise intractable problems. ### Pretending we're on a different platform ```{r} #| include: false system_os <- NULL ``` `testthat::skip_on_os()` allows you to skip tests on specific operating systems, using the internal `system_os()` function which is a thin wrapper around `Sys.info()[["sysname"]]`. To test that this skip works correctly, we have to use mocking because there's no other way to pretend we're running on a different operating system. This yields the following test, where we using mocking to pretend that we're always on Windows: ```{r} #| eval: false test_that("can skip on multiple oses", { local_mocked_bindings(system_os = function() "windows") expect_skip(skip_on_os("windows")) expect_skip(skip_on_os(c("windows", "linux"))) expect_no_skip(skip_on_os("linux")) }) ``` (The logic of `skip_on_os()` is simple enough that I feel confident we only need to simulate one platform.) ### Speeding up tests `usethis::use_release_issue()` creates a GitHub issue with a bulleted list of actions to follow when releasing a package. But some of the bullets depend on complex conditions that can take a while to compute. So the [tests for this function](https://github.com/r-lib/usethis/blob/main/tests/testthat/test-release.R) use mocks like this: ```{r} #| eval: false local_mocked_bindings( get_revdeps = function() character(), gh_milestone_number = function(...) NA ) ``` Here we pretend that there are no reverse dependencies (revdeps) for the package, which is both slow to compute and will vary over time if we use a real package. We also pretend that there are no related GitHub milestones, which otherwise requires an GitHub API call, which is again slow and might vary over time. Together, these mocks keep the tests fast and self-contained, free from any state outside of our direct control. ### Managing time `httr2::req_throttle()` prevents multiple requests from being made too quickly, using a technique called a leaky token bucket. This technique is inextricably tied to real time because you want to allow more requests as time elapses. So how do you test this? I started by using `Sys.sleep()`, but this made my tests both slow (because I'd sleep for a second or two) and unreliable (because sometimes more time elapsed than I expected). Eventually I figured out that I could "manually control" time by using a [mocked function](https://github.com/r-lib/httr2/blob/main/tests/testthat/test-req-throttle.R) that returns the value of a variable I control. This allows me to manually advance time and carefully test the implications. You can see the basic idea with a simpler example. Let's first begin with a function that returns the "unix time", the number of seconds elapsed since midnight on Jan 1, 1970. This is easy to compute, but will make some computations simpler later as well as providing a convenient function to mock. ```{r} unix_time <- function() unclass(Sys.time()) unix_time() ``` Now I'm going to create a function factory that makes it easy to compute how much time has elapsed since some fixed starting point: ```{r} elapsed <- function() { start <- unix_time() function() { unix_time() - start } } timer <- elapsed() Sys.sleep(0.5) timer() ``` Imagine trying to test this function without mocking! You'd probably think it's not worth it. In fact, that's what I thought originally, but I soon learned my lesson because I introduce bug because I'd forgotten the complexities of computing the difference between two POSIXct values. With mocking, however, I can "manipulate time" by mocking `unix_time()` so that it returns the value of a variable I control. Now I can write a reliable test: ```{r} test_that("elapsed() measures elapsed time", { time <- 1 local_mocked_bindings(unix_time = function() time) timer <- elapsed() expect_equal(timer(), 0) time <- 2 expect_equal(timer(), 1) }) ``` ## How does mocking work? To finish up, it's worth discussing how mocking works. The fundamental challenge of mocking is that you want it to be "hygienic", i.e. it should only affect the operation of your package code, not all running code. You can see why this might be problematic if you imagine mocking a function that testthat itself uses: you don't want to accidentally break testthat while trying to test your code! To achieve this goal, `local_mocked_bindings()` works by modifying your package's [namespace environment](https://adv-r.hadley.nz/environments.html#special-environments). You can implement the basic idea using base R code like this: ```{r} #| eval: false old <- getFromNamespace("my_function", "mypackage") assignInNamespace("my_function", new, "mypackage") # run the test... # restore the previous value assignInNamespace("my_function", old, "mypackage") ``` This implementation leads to two limitations of `local_mocked_bindings()`: 1. The package namespace is locked, which means that you can't add new bindings to it. That means if you want to mock base functions, you have to provide some binding that can be overridden. The easiest way to do this is with something like `mean <- NULL`. This creates a binding that `local_mocked_bindings()` can modify, but because of R's [lexical scoping rules](https://adv-r.hadley.nz/functions.html#functions-versus-variables) doesn't affect ordinary calls. 2. `::` doesn't use the package namespace, so if you want to mock an explicitly namespaced function, you either have import `fun` into your `NAMESPACE` (e.g., with `@importFrom pkg fun`) or create your own wrapper function that you can mock. Typically, one of these options will feel fairly natural. Overall, these limitations feel correct to me: `local_mocked_bindings()` makes it easy to temporarily change the implementation of functions that you have written, while offering workarounds to override the implementations of functions that others have written in the scope of your package. testthat/inst/doc/parallel.R0000644000176200001440000000023315130237645015557 0ustar liggesusers## ----setup, include = FALSE--------------------------------------------------- library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") testthat/inst/doc/parallel.html0000644000176200001440000004713515130237645016336 0ustar liggesusers Running tests in parallel

    Running tests in parallel

    Setup

    To enable parallel testing, you must first be using the 3rd edition1. Then add the following line to the DESCRIPTION:

    Config/testthat/parallel: true

    If needed (for example, for debugging) you can temporarily suppress parallel testing with Sys.setenv(TESTTHAT_PARALLEL = "false").

    By default, testthat will use getOption("Ncpus", 2) cores. To increase that value for your development machine we recommend setting TESTTHAT_CPUS in your .Renviron. The easiest way to do that is call usethis::edit_r_environ() and then add something like the following:

    TESTTHAT_CPUS=4

    Tests are run in alphabetical order by default, but you can often improve performance by starting the slowest tests first. Specify these tests by supplying a comma separated list of glob patterns2 to the Config/testthat/start-first field in your DESCRIPTION, e.g.:

    Config/testthat/start-first: watcher, parallel*

    Basic operation

    Each worker begins by loading testthat and the package being tested. It then runs any setup files (so if you have existing setup files you’ll need to make sure they work when executed in parallel).

    testthat runs test files in parallel. Once the worker pool is initialized, testthat then starts sending test files to workers, by default in alphabetical order: as soon as a subprocess has finished, it receives another file, until all files are done. This means that state is persisted across test files: options are not reset, loaded packages are not unloaded, the global environment is not cleared, etc. You are responsible for making sure each file leaves the world as it finds it.

    Common problems

    • If tests fail stochastically (i.e. they sometimes work and sometimes fail) you may have accidentally introduced a dependency between your test files. This sort of dependency is hard to track down due to the random nature, and you’ll need to check all tests to make sure that they’re not accidentally changing global state. set_state_inspector() will make this easier.

    • If you use packaged scope test fixtures, you’ll need to review them to make sure that they work in parallel. For example, if you were previously creating a temporary database in the test directory, you’d need to instead create it in the session temporary directory so that each process gets its own independent version.

    Performance

    There is some overhead associated with running tests in parallel:

    • Startup cost is linear in the number of subprocesses, because we need to create them in a loop. This is about 50ms on my laptop. Each subprocess needs to load testthat and the tested package, this happens in parallel, and we cannot do too much about it.

    • Clean up time is again linear in the number of subprocesses, and it about 80ms per subprocess on my laptop.

    • It seems that sending a message (i.e. a passing or failing expectation) is about 2ms currently. This is the total cost that includes sending the message, receiving it, and replying it to a non-parallel reporter.

    This overhead generally means that if you have many test files that take a short amount of time, you’re unlikely to see a huge benefit by using parallel tests. For example, testthat itself takes about 10s to run tests in serial, and 8s to run the tests in parallel.

    Reporters

    Default reporters

    See default_reporter() for how testthat selects the default reporter for devtools::test() and testthat::test_local(). In short, testthat selects ProgressReporter for non-parallel and ParallelProgressReporter for parallel tests by default. (Other testthat test functions, like test_check(), test_file() , etc. select different reporters by default.)

    Parallel support

    Most reporters support parallel tests. If a reporter is passed to devtools::test(), testthat::test_dir(), etc. directly, and it does not support parallel tests, then testthat runs the test files sequentially.

    Currently the following reporters don’t support parallel tests:

    • DebugReporter, because it is not currently possible to debug subprocesses.

    • JunitReporter, because this reporter records timing information for each test block, and this is currently only available for reporters that support multiple active test files. (See “Writing parallel reporters” below.)

    • LocationReporter because testthat currently does not include location information for successful tests when running in parallel, to minimize messaging between the processes.

    • StopReporter, as this is a reporter that testthat uses for interactive expect_that() calls.

    The other built-in reporters all support parallel tests, with some subtle differences:

    • Reporters that stop after a certain number of failures can only stop at the end of a test file.

    • Reporters report all information about a file at once, unless they support parallel updates. E.g. ProgressReporter does not update its display until a test file is complete.

    • The standard output and standard error, i.e. print(), cat(), message(), etc. output from the test files are lost currently. If you want to use cat() or message() for print-debugging test cases, then the best is to temporarily run tests sequentially, by changing the Config entry in DESCRIPTION or setting Sys.setenv(TESTTHAT_PARALLEL = "false").

    Writing parallel reporters

    To support parallel tests, a reporter must be able to function when the test files run in a subprocess. For example DebugReporter does not support parallel tests, because it requires direct interaction with the frames in the subprocess. When running in parallel, testthat does not provide location information (source references) for test successes.

    To support parallel tests, a reporter must set self$capabilities$parallel_support to TRUE in its initialize() method:

    ...
    initialize = function(...) {
      super$initialize(...)
      self$capabilities$parallel_support <- TRUE
      ...
    }
    ...

    When running in parallel, testthat runs the reporter in the main process, and relays information between the reporter and the test code transparently. (Currently the reporter does not even know that the tests are running in parallel.)

    If a reporter does not support parallel updates (see below), then testthat internally caches all calls to the reporter methods from subprocesses, until a test file is complete. This is because these reporters are not prepared for running multiple test files concurrently. Once a test file is complete, testthat calls the reporter’s $start_file() method, relays all $start_test() , $end_test(), $add_result(), etc. calls in the order they came in from the subprocess, and calls $end_file() .

    Parallel updates

    The ParallelProgressReporter supports parallel updates. This means that once a message from a subprocess comes in, the reporter is updated immediately. For this to work, a reporter must be able to handle multiple test files concurrently.

    A reporter declares parallel update support by setting self$capabilities$parallel_updates to TRUE:

    ...
    initialize = function(...) {
      super$initialize(...)
      self$capabilities$parallel_support <- TRUE
      self$capabilities$parallel_updates <- TRUE
      ...
    }
    ...

    For these reporters, testthat does not cache the messages from the subprocesses. Instead, when a message comes in:

    • It calls the $start_file() method, letting the reporter know which file the following calls apply to. This means that the reporter can receive multiple $start_file() calls for the same file.

    • Then relays the message from the subprocess, calling the appropriate $start_test() , $add_result(), etc. method.

    testthat also calls the new $update() method of the reporter regularly, even if it does not receive any messages from the subprocess. (Currently aims to do this every 100ms, but there are no guarantees.) The $update() method may implement a spinner to let the user know that the tests are running.


    1. See vignette("third-edition") for details.↩︎

    2. See ?utils::glob2rx for details↩︎

    testthat/inst/doc/snapshotting.html0000644000176200001440000101114715130237650017252 0ustar liggesusers Snapshot tests

    Snapshot tests

    The goal of a unit test is to record the expected output of a function using code. This is a powerful technique because it not only ensures that code doesn’t change unexpectedly, but it also expresses the desired behavior in a way that a human can understand.

    However, it’s not always convenient to record the expected behavior with code. Some challenges include:

    • Text output that includes many characters like quotes and newlines that require special handling in a string.

    • Output that is large, making it painful to define the reference output and bloating the size of the test file.

    • Binary formats like plots or images, which are very difficult to describe in code: e.g., the plot looks right, the error message is actionable, or the print method uses color effectively.

    For these situations, testthat provides an alternative mechanism: snapshot tests. Instead of using code to describe expected output, snapshot tests (also known as golden tests) record results in a separate human-readable file. Snapshot tests in testthat are inspired primarily by Jest, thanks to a number of very useful discussions with Joe Cheng.

    library(testthat)

    Basic workflow

    We’ll illustrate the basic workflow with a simple function that generates HTML bullets. It can optionally include an id attribute, which allows you to construct a link directly to that list.

    bullets <- function(text, id = NULL) {
      paste0(
        "<ul", if (!is.null(id)) paste0(" id=\"", id, "\""), ">\n", 
        paste0("  <li>", text, "</li>\n", collapse = ""),
        "</ul>\n"
      )
    }
    cat(bullets("a", id = "x"))
    #> <ul id="x">
    #>   <li>a</li>
    #> </ul>

    Testing this simple function is relatively painful. To write the test you have to carefully escape the newlines and quotes. And then when you re-read the test in the future, all that escaping makes it hard to tell exactly what it’s supposed to return.

    test_that("bullets", {
      expect_equal(bullets("a"), "<ul>\n  <li>a</li>\n</ul>\n")
      expect_equal(bullets("a", id = "x"), "<ul id=\"x\">\n  <li>a</li>\n</ul>\n")
    })
    #> Test passed with 2 successes 🥇.

    This is a great place to use snapshot testing. To do this we make two changes to our code:

    • We use expect_snapshot() instead of expect_equal()

    • We wrap the call in cat() (to avoid [1] in the output, like in the first interactive example above).

    This yields the following test:

    test_that("bullets", {
      expect_snapshot(cat(bullets("a")))
      expect_snapshot(cat(bullets("a", "b")))
    })
    #> ── Warning: bullets ────────────────────────────────────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   cat(bullets("a"))
    #> Output
    #>   <ul>
    #>     <li>a</li>
    #>   </ul>
    #> ── Warning: bullets ────────────────────────────────────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   cat(bullets("a", "b"))
    #> Output
    #>   <ul id="b">
    #>     <li>a</li>
    #>   </ul>
    #> Test passed with 2 successes 🎊.

    When we run the test for the first time, it automatically generates reference output and prints it, so that you can visually confirm that it’s correct. The output is automatically saved in _snaps/{name}.md. The name of the snapshot matches your test file name — e.g. if your test is test-pizza.R then your snapshot will be saved in tests/testthat/_snaps/pizza.md. As the file name suggests, this is a markdown file, which I’ll explain shortly.

    If you run the test again, it’ll succeed:

    test_that("bullets", {
      expect_snapshot(cat(bullets("a")))
      expect_snapshot(cat(bullets("a", "b")))
    })
    #> Test passed with 2 successes 🌈.

    But if you change the underlying code, say to tweak the indenting, the test will fail:

    bullets <- function(text, id = NULL) {
      paste0(
        "<ul", if (!is.null(id)) paste0(" id=\"", id, "\""), ">\n", 
        paste0("<li>", text, "</li>\n", collapse = ""),
        "</ul>\n"
      )
    }
    test_that("bullets", {
      expect_snapshot(cat(bullets("a")))
      expect_snapshot(cat(bullets("a", "b")))
    })
    #> ── Failure: bullets ────────────────────────────────────────────────────────────
    #> Snapshot of code has changed:
    #>     old                 | new                    
    #> [2]   cat(bullets("a")) |   cat(bullets("a")) [2]
    #> [3] Output              | Output              [3]
    #> [4]   <ul>              |   <ul>              [4]
    #> [5]     <li>a</li>      -   <li>a</li>        [5]
    #> [6]   </ul>             |   </ul>             [6]
    #> * Run `testthat::snapshot_accept("snapshotting.Rmd")` to accept the change.
    #> * Run `testthat::snapshot_review("snapshotting.Rmd")` to review the change.
    #> ── Failure: bullets ────────────────────────────────────────────────────────────
    #> Snapshot of code has changed:
    #>     old                      | new                         
    #> [2]   cat(bullets("a", "b")) |   cat(bullets("a", "b")) [2]
    #> [3] Output                   | Output                   [3]
    #> [4]   <ul id="b">            |   <ul id="b">            [4]
    #> [5]     <li>a</li>           -   <li>a</li>             [5]
    #> [6]   </ul>                  |   </ul>                  [6]
    #> * Run `testthat::snapshot_accept("snapshotting.Rmd")` to accept the change.
    #> * Run `testthat::snapshot_review("snapshotting.Rmd")` to review the change.
    #> Error:
    #> ! Test failed with 2 failures and 0 successes.

    If this is a deliberate change, you can follow the advice in the message and update the snapshots for that file by running snapshot_accept("pizza"); otherwise, you can fix the bug and your tests will pass once more. (You can also accept snapshots for all files with snapshot_accept().)

    If you delete the test, the corresponding snapshot will be removed the next time you run the tests. If you delete all snapshots in the file, the entire snapshot file will be deleted the next time you run all the tests.

    Snapshot format

    Snapshots are recorded using a subset of markdown. You might wonder why we use markdown. We use it because it’s important that snapshots be human-readable because humans have to read them during code reviews. Reviewers often don’t run your code but still want to understand the changes.

    Here’s the snapshot file generated by the test above:

    # bullets
    
        <ul>
          <li>a</li>
        </ul>
      
    ---
    
        <ul id="x">
          <li>a</li>
        </ul>

    Each test starts with # {test name}, a level 1 heading. Within a test, each snapshot expectation is indented by four spaces, i.e., as code, and they are separated by ---, a horizontal rule.

    Interactive usage

    Because the snapshot output uses the name of the current test file and the current test, snapshot expectations don’t really work when run interactively at the console. Since they can’t automatically find the reference output, they instead just print the current value for manual inspection.

    Testing errors

    So far we’ve focused on snapshot tests for output printed to the console. But expect_snapshot() also captures messages, errors, and warnings[^1]. Messages and warnings are straightforward, but capturing errors is slightly more difficult because expect_snapshot() will fail if there’s an error:

    test_that("you can't add a number and a letter", {
      expect_snapshot(1 + "a")
    })
    #> ── Error: you can't add a number and a letter ──────────────────────────────────
    #> Error in `1 + "a"`: non-numeric argument to binary operator
    #> Error:
    #> ! Test failed with 1 failure and 0 successes.

    This is a safety valve that ensures that you don’t accidentally write broken code. To deliberately snapshot an error, you’ll have to specifically request it with error = TRUE:

    test_that("you can't add a number and a letter", {
      expect_snapshot(1 + "a", error = TRUE)
    })
    #> ── Warning: you can't add a number and a letter ────────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   1 + "a"
    #> Condition
    #>   Error in `1 + "a"`:
    #>   ! non-numeric argument to binary operator
    #> Test passed with 1 success 🎊.

    When the code gets longer, I like to put error = TRUE up front so it’s a little more obvious:

    test_that("you can't add weird things", {
      expect_snapshot(error = TRUE, {
        1 + "a"
        mtcars + iris
        Sys.Date() + factor()
      })
    })
    #> ── Warning: you can't add weird things ─────────────────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   1 + "a"
    #> Condition
    #>   Error in `1 + "a"`:
    #>   ! non-numeric argument to binary operator
    #> Code
    #>   mtcars + iris
    #> Condition
    #>   Error in `Ops.data.frame()`:
    #>   ! '+' only defined for equally-sized data frames
    #> Code
    #>   Sys.Date() + factor()
    #> Condition
    #>   Warning:
    #>   Incompatible methods ("+.Date", "Ops.factor") for "+"
    #> Output
    #>   numeric(0)
    #> Test passed with 1 success 🥇.

    Just be careful: when you set error = TRUE, expect_snapshot() checks that at least one expression throws an error, not that every expression throws an error. For example, look above and notice that adding a date and a factor generated a warning, not an error.

    Snapshot tests are particularly important when testing complex error messages, such as those that you might generate with cli. Here’s a more realistic example illustrating how you might test check_unnamed(), a function that ensures all arguments in ... are unnamed.

    check_unnamed <- function(..., call = parent.frame()) {
      names <- ...names()
      has_name <- names != ""
      if (!any(has_name)) {
        return(invisible())
      }
    
      named <- names[has_name]
      cli::cli_abort(
        c(
          "All elements of {.arg ...} must be unnamed.",
          i = "You supplied argument{?s} {.arg {named}}."
        ), 
        call = call
      )
    }
    
    test_that("no errors if all arguments unnamed", {
      expect_no_error(check_unnamed())
      expect_no_error(check_unnamed(1, 2, 3))
    })
    #> Test passed with 2 successes 🎉.
    
    test_that("actionable feedback if some or all arguments named", {
      expect_snapshot(error = TRUE, {
        check_unnamed(x = 1, 2)
        check_unnamed(x = 1, y = 2)
      })
    })
    #> ── Warning: actionable feedback if some or all arguments named ─────────────────
    #> Adding new snapshot:
    #> Code
    #>   check_unnamed(x = 1, 2)
    #> Condition
    #>   Error:
    #>   ! All elements of `...` must be unnamed.
    #>   i You supplied argument `x`.
    #> Code
    #>   check_unnamed(x = 1, y = 2)
    #> Condition
    #>   Error:
    #>   ! All elements of `...` must be unnamed.
    #>   i You supplied arguments `x` and `y`.
    #> Test passed with 1 success 🎊.

    Other challenges

    Varying outputs

    Sometimes part of the output varies in ways that you can’t easily control. In many cases, it’s convenient to use mocking (vignette("mocking")) to ensure that every run of the function always produces the same output. In other cases, it’s easier to manipulate the text output with a regular expression or similar. That’s the job of the transform argument, which should be passed a function that takes a character vector of lines and returns a modified vector.

    This type of problem often crops up when you are testing a function that gives feedback about a path. In your tests, you’ll typically use a temporary path (e.g., from withr::local_tempfile()), so if you display the path in a snapshot, it will be different every time. For example, consider this “safe” version of writeLines() that requires you to explicitly opt in to overwriting an existing file:

    safe_write_lines <- function(lines, path, overwrite = FALSE) {
      if (file.exists(path) && !overwrite) {
        cli::cli_abort(c(
          "{.path {path}} already exists.", 
          i = "Set {.code overwrite = TRUE} to overwrite"
        ))
      }
    
      writeLines(lines, path)
    }

    If you use a snapshot test to confirm that the error message is useful, the snapshot will be different every time the test is run:

    test_that("generates actionable error message", {
      path <- withr::local_tempfile(lines = "")
      expect_snapshot(safe_write_lines(letters, path), error = TRUE)
    })
    #> ── Warning: generates actionable error message ─────────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   safe_write_lines(letters, path)
    #> Condition
    #>   Error in `safe_write_lines()`:
    #>   ! '/tmp/RtmpazfsKv/file928010855f6' already exists.
    #>   i Set `overwrite = TRUE` to overwrite
    #> Test passed with 1 success 🎊.
    test_that("generates actionable error message", {
      path <- withr::local_tempfile(lines = "")
      expect_snapshot(safe_write_lines(letters, path), error = TRUE)
    })
    #> ── Failure: generates actionable error message ─────────────────────────────────
    #> Snapshot of code has changed:
    #> old[2:6] vs new[2:6]
    #>     safe_write_lines(letters, path)
    #>   Condition
    #>     Error in `safe_write_lines()`:
    #> -   ! '/tmp/RtmpazfsKv/file928010855f6' already exists.
    #> +   ! '/tmp/RtmpazfsKv/file92803dd3f5ec' already exists.
    #>     i Set `overwrite = TRUE` to overwrite
    #> * Run `testthat::snapshot_accept("snapshotting.Rmd")` to accept the change.
    #> * Run `testthat::snapshot_review("snapshotting.Rmd")` to review the change.
    #> Error:
    #> ! Test failed with 1 failure and 0 successes.

    One way to fix this problem is to use the transform argument to replace the temporary path with a fixed value:

    test_that("generates actionable error message", {
      path <- withr::local_tempfile(lines = "")
      expect_snapshot(
        safe_write_lines(letters, path), 
        error = TRUE,
        transform = \(lines) gsub(path, "<path>", lines, fixed = TRUE)
      )
    })
    #> ── Warning: generates actionable error message ─────────────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   safe_write_lines(letters, path)
    #> Condition
    #>   Error in `safe_write_lines()`:
    #>   ! '<path>' already exists.
    #>   i Set `overwrite = TRUE` to overwrite
    #> Test passed with 1 success 🎊.

    Now even though the path varies, the snapshot does not.

    local_reproducible_output()

    By default, testthat sets a number of options that simplify and standardize output:

    • The console width is set to 80.
    • {cli} ANSI coloring and hyperlinks are suppressed.
    • Unicode characters are suppressed.

    These are sound defaults that we have found useful to minimize spurious differences between tests run in different environments. However, there are times when you want to deliberately test different widths, ANSI escapes, or Unicode characters, so you can override the defaults with local_reproducible_output().

    Snapshotting graphics

    If you need to test graphical output, use {vdiffr}. vdiffr is used to test ggplot2 and incorporates everything we know about high-quality graphics tests that minimize false positives. Graphics testing is still often fragile, but using vdiffr means you will avoid all the problems we know how to avoid.

    Snapshotting values

    expect_snapshot() is the most used snapshot function because it records everything: the code you run, printed output, messages, warnings, and errors. If you care about the return value rather than any side effects, you might want to use expect_snapshot_value() instead. It offers a number of serialization approaches that provide a tradeoff between accuracy and human readability.

    test_that("can snapshot a simple list", {
      x <- list(a = list(1, 5, 10), b = list("elephant", "banana"))
      expect_snapshot_value(x)
    })
    #> ── Warning: can snapshot a simple list ─────────────────────────────────────────
    #> Adding new snapshot:
    #> {
    #>   "a": [
    #>     1,
    #>     5,
    #>     10
    #>   ],
    #>   "b": [
    #>     "elephant",
    #>     "banana"
    #>   ]
    #> }
    #> Test passed with 1 success 🌈.

    Whole file snapshotting

    expect_snapshot(), expect_snapshot_output(), expect_snapshot_error(), and expect_snapshot_value() use one snapshot file per test file. But that doesn’t work for all file types—for example, what happens if you want to snapshot an image? expect_snapshot_file() provides an alternative workflow that generates one snapshot per expectation, rather than one file per test. Assuming you’re in test-burger.R, then the snapshot created by expect_snapshot_file(code_that_returns_path_to_file(), "toppings.png") would be saved in tests/testthat/_snaps/burger/toppings.png. If a future change in the code creates a different file, it will be saved in tests/testthat/_snaps/burger/toppings.new.png.

    Unlike expect_snapshot() and friends, expect_snapshot_file() can’t provide an automatic diff when the test fails. Instead, you’ll need to call snapshot_review(). This launches a Shiny app that allows you to visually review each change and approve it if it’s deliberate:

    Screenshot of the Shiny app for reviewing snapshot changes to images. It shows the changes to a png file of a plot created in a snapshot test. There is a button to accept the changed snapshot, or to skip it.

    Screenshot of the Shiny app for reviewing snapshot changes to text files. It shows the changes to a .R file created in a snapshot test, where a line has been removed. There is a button to accept the changed snapshot, or to skip it.

    The display varies based on the file type (currently text files, common image files, and csv files are supported).

    Sometimes the failure occurs in a non-interactive environment where you can’t run snapshot_review(), e.g., in R CMD check. In this case, the easiest fix is to retrieve the .new file, copy it into the appropriate directory, and then run snapshot_review() locally. If this happens on GitHub, testthat provides some tools to help you in the form of gh_download_artifact().

    In most cases, we don’t expect you to use expect_snapshot_file() directly. Instead, you’ll use it via a wrapper that does its best to gracefully skip tests when differences in platform or package versions make it unlikely to generate perfectly reproducible output. That wrapper should also typically call announce_snapshot_file() to avoid snapshots being incorrectly cleaned up—see the documentation for more details.

    Previous work

    This is not the first time that testthat has attempted to provide snapshot testing (although it’s the first time I knew what other languages called them). This section describes some of the previous attempts and why we believe the new approach is better.

    • verify_output() has three main drawbacks:

      • You have to supply a path where the output will be saved. This seems like a small issue, but thinking of a good name, and managing the difference between interactive and test-time paths introduces a surprising amount of friction.

      • It always overwrites the previous result, automatically assuming that the changes are correct. That means you have to use it with git, and it’s easy to accidentally accept unwanted changes.

      • It’s relatively coarse grained, which means tests that use it tend to keep growing and growing.

    • expect_known_output() is a finer-grained version of verify_output() that captures output from a single function. The requirement to produce a path for each individual expectation makes it even more painful to use.

    • expect_known_value() and expect_known_hash() have all the disadvantages of expect_known_output(), but also produce binary output, meaning that you can’t easily review test differences in pull requests.

    testthat/inst/doc/third-edition.R0000644000176200001440000000354415130237654016536 0ustar liggesusers## ----include = FALSE---------------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ## ----message = FALSE---------------------------------------------------------- library(testthat) local_edition(3) ## ----------------------------------------------------------------------------- test_that("I can use the 3rd edition", { local_edition(3) expect_true(TRUE) }) ## ----------------------------------------------------------------------------- test_that("I want to use the 2nd edition", { local_edition(2) expect_true(TRUE) }) ## ----------------------------------------------------------------------------- f <- function() { warning("First warning") warning("Second warning") warning("Third warning") } local_edition(2) expect_warning(f(), "First") ## ----------------------------------------------------------------------------- local_edition(3) expect_warning(f(), "First") ## ----------------------------------------------------------------------------- f() |> expect_warning("First") |> expect_warning("Second") |> expect_warning("Third") f() |> expect_warning("First") |> suppressWarnings() ## ----------------------------------------------------------------------------- test_that("f() produces expected outputs/messages/warnings", { expect_snapshot(f()) }) ## ----error = TRUE------------------------------------------------------------- try({ f1 <- factor(letters[1:3]) f2 <- ordered(letters[1:3], levels = letters[1:4]) local_edition(2) expect_equal(f1, f2) local_edition(3) expect_equal(f1, f2) }) ## ----error = TRUE------------------------------------------------------------- try({ dt1 <- dt2 <- ISOdatetime(2020, 1, 2, 3, 4, 0) attr(dt1, "tzone") <- "" attr(dt2, "tzone") <- Sys.timezone() local_edition(2) expect_equal(dt1, dt2) local_edition(3) expect_equal(dt1, dt2) }) testthat/inst/doc/mocking.html0000644000176200001440000011424415130237645016165 0ustar liggesusers Mocking

    Mocking

    Mocking allows you to temporarily replace the implementation of a function with something that makes it easier to test. It’s useful when testing failure scenarios that are hard to generate organically (e.g., what happens if dependency X isn’t installed?), making tests more reliable, and making tests faster. It’s also a general escape hatch to resolve almost any challenging testing problem. That said, mocking comes with downsides too: it’s an advanced technique that can lead to brittle tests or tests that silently conceal problems. You should only use it when all other approaches fail.

    (If, like me, you’re confused as to why you’d want to cruelly make fun of your tests, mocking here is used in the sense of making a fake or simulated version of something, i.e., a mock-up.)

    testthat’s primary mocking tool is local_mocked_bindings() which is used to mock functions and is the focus of this vignette. But it also provides other tools for specialized cases: you can use local_mocked_s3_method() to mock an S3 method, local_mocked_s4_method() to mock an S4 method, and local_mocked_r6_class() to mock an R6 class. Once you understand the basic idea of mocking, it should be straightforward to apply these other tools where needed.

    In this vignette, we’ll start by illustrating the basics of mocking with a few examples, continue to some real-world case studies from throughout the tidyverse, then finish up with the technical details so you can understand the tradeoffs of the current implementation.

    Getting started with mocking

    Let’s begin by motivating mocking with a simple example. Imagine you’re writing a function like rlang::check_installed(). The goal of this function is to check if a package is installed, and if not, give a nice error message. It also takes an optional min_version argument that you can use to enforce a version constraint. A simple base R implementation might look something like this:

    check_installed <- function(pkg, min_version = NULL) {
      if (!requireNamespace(pkg, quietly = TRUE)) {
        stop(sprintf("{%s} is not installed.", pkg))
      }
      if (!is.null(min_version)) {
        pkg_version <- packageVersion(pkg)
        if (pkg_version < min_version) {
          stop(sprintf(
            "{%s} version %s is installed, but %s is required.", 
            pkg, 
            pkg_version, 
            min_version
          ))
        }
      }
    
      invisible()
    }

    Now that we’ve written this function, we want to test it. There are many ways we might tackle this, but it’s reasonable to start by testing the case where we don’t specify a minimum version. To do this, we need to come up with a package we know is installed and a package we know isn’t installed:

    test_that("check_installed() checks package is installed", {
      expect_no_error(check_installed("testthat"))
      expect_snapshot(check_installed("doesntexist"), error = TRUE)
    })
    #> ── Warning: check_installed() checks package is installed ──────────────────────
    #> Adding new snapshot:
    #> Code
    #>   check_installed("doesntexist")
    #> Condition
    #>   Error in `check_installed()`:
    #>   ! {doesntexist} is not installed.
    #> Test passed with 2 successes 🎉.

    This is probably fine as we certainly know that testthat must be installed but it feels a little fragile as it depends on external state that we don’t control. While it’s pretty unlikely, if someone does create a doesntexist package, this test will no longer work. As a general principle, the less your tests rely on state outside of your control, the more robust and reliable they’ll be.

    Next we want to check the case where we specify a minimum version, and again we need to make up some inputs:

    test_that("check_installed() checks minimum version", {
      expect_no_error(check_installed("testthat", "1.0.0"))
      expect_snapshot(check_installed("testthat", "99.99.999"), error = TRUE)
    })
    #> ── Warning: check_installed() checks minimum version ───────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   check_installed("testthat", "99.99.999")
    #> Condition
    #>   Error in `check_installed()`:
    #>   ! {testthat} version 3.3.2 is installed, but 99.99.999 is required.
    #> Test passed with 2 successes 🥳.

    Again, this is probably safe (since I’m unlikely to release 90+ new versions of testthat), but if you look at the snapshot message carefully, you’ll notice that it includes the current version of testthat. That means every time a new version of testthat is released, we’ll have to update the snapshot. We could use the transform argument to fix this:

    test_that("check_installed() checks minimum version", {
      expect_no_error(check_installed("testthat", "1.0.0"))
      expect_snapshot(
        check_installed("testthat", "99.99.999"), 
        error = TRUE, 
        transform = function(lines) gsub(packageVersion("testthat"), "<version>", lines)
      )
    })
    #> ── Warning: check_installed() checks minimum version ───────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   check_installed("testthat", "99.99.999")
    #> Condition
    #>   Error in `check_installed()`:
    #>   ! {testthat} version <version> is installed, but 99.99.999 is required.
    #> Test passed with 2 successes 🎉.

    But it’s starting to feel like we’ve accumulating more and more hacks. So let’s take a fresh look and see how mocking might help us. The basic idea of mocking is to temporarily replace the implementation of functions being used by the function we’re testing. Here we’re testing check_installed() and want to mock requireNamespace() and packageVersion() so we can control their versions. There’s a small wrinkle here in that requireNamespace and packageVersion are base functions, not our functions, so we need to make bindings in our package namespace so we can mock them (we’ll come back to why later).

    requireNamespace <- NULL
    packageVersion <- NULL

    For the first test, we mock requireNamespace() twice: first to always return TRUE (pretending every package is installed), and then to always return FALSE (pretending that no packages are installed). Now the test is completely self-contained and doesn’t depend on what packages happen to be installed.

    test_that("check_installed() checks package is installed", {
      local_mocked_bindings(requireNamespace = function(...) TRUE)
      expect_no_error(check_installed("package-name"))
    
      local_mocked_bindings(requireNamespace = function(...) FALSE)
      expect_snapshot(check_installed("package-name"), error = TRUE)
    })
    #> ── Warning: check_installed() checks package is installed ──────────────────────
    #> Adding new snapshot:
    #> Code
    #>   check_installed("package-name")
    #> Condition
    #>   Error in `check_installed()`:
    #>   ! {package-name} is not installed.
    #> Test passed with 2 successes 🎊.

    For the second test, we mock requireNamespace() to return TRUE, and then packageVersion() to always return version 2.0.0. This again ensures our test is independent of system state.

    test_that("check_installed() checks minimum version", {
      local_mocked_bindings(
        requireNamespace = function(...) TRUE,
        packageVersion = function(...) numeric_version("2.0.0")
      )
      
      expect_no_error(check_installed("package-name", "1.0.0"))
      expect_snapshot(check_installed("package-name", "3.4.5"), error = TRUE)
    })
    #> ── Warning: check_installed() checks minimum version ───────────────────────────
    #> Adding new snapshot:
    #> Code
    #>   check_installed("package-name", "3.4.5")
    #> Condition
    #>   Error in `check_installed()`:
    #>   ! {package-name} version 2.0.0 is installed, but 3.4.5 is required.
    #> Test passed with 2 successes 🎉.

    Case studies

    To give you more experience with mocking, this section looks at a few places where we use mocking in the tidyverse:

    • Testing testthat::skip_on_os() regardless of what operating system is running the test.
    • Speeding up usethis::use_release_issue().
    • Testing the passage of time in httr2::req_throttle().

    These situations are all a little complex, as this is the nature of mocking: if you can use a simpler technique, you should. Mocking is only needed for otherwise intractable problems.

    Pretending we’re on a different platform

    testthat::skip_on_os() allows you to skip tests on specific operating systems, using the internal system_os() function which is a thin wrapper around Sys.info()[["sysname"]]. To test that this skip works correctly, we have to use mocking because there’s no other way to pretend we’re running on a different operating system. This yields the following test, where we using mocking to pretend that we’re always on Windows:

    test_that("can skip on multiple oses", {
      local_mocked_bindings(system_os = function() "windows")
    
      expect_skip(skip_on_os("windows"))
      expect_skip(skip_on_os(c("windows", "linux")))
      expect_no_skip(skip_on_os("linux"))
    })

    (The logic of skip_on_os() is simple enough that I feel confident we only need to simulate one platform.)

    Speeding up tests

    usethis::use_release_issue() creates a GitHub issue with a bulleted list of actions to follow when releasing a package. But some of the bullets depend on complex conditions that can take a while to compute. So the tests for this function use mocks like this:

    local_mocked_bindings(
      get_revdeps = function() character(),
      gh_milestone_number = function(...) NA
    )

    Here we pretend that there are no reverse dependencies (revdeps) for the package, which is both slow to compute and will vary over time if we use a real package. We also pretend that there are no related GitHub milestones, which otherwise requires an GitHub API call, which is again slow and might vary over time. Together, these mocks keep the tests fast and self-contained, free from any state outside of our direct control.

    Managing time

    httr2::req_throttle() prevents multiple requests from being made too quickly, using a technique called a leaky token bucket. This technique is inextricably tied to real time because you want to allow more requests as time elapses. So how do you test this? I started by using Sys.sleep(), but this made my tests both slow (because I’d sleep for a second or two) and unreliable (because sometimes more time elapsed than I expected). Eventually I figured out that I could “manually control” time by using a mocked function that returns the value of a variable I control. This allows me to manually advance time and carefully test the implications.

    You can see the basic idea with a simpler example. Let’s first begin with a function that returns the “unix time”, the number of seconds elapsed since midnight on Jan 1, 1970. This is easy to compute, but will make some computations simpler later as well as providing a convenient function to mock.

    unix_time <- function() unclass(Sys.time())
    unix_time()
    #> [1] 1767980965

    Now I’m going to create a function factory that makes it easy to compute how much time has elapsed since some fixed starting point:

    elapsed <- function() {
      start <- unix_time()
      function() {
        unix_time() - start
      }
    }
    
    timer <- elapsed()
    Sys.sleep(0.5)
    timer()
    #> [1] 0.507338

    Imagine trying to test this function without mocking! You’d probably think it’s not worth it. In fact, that’s what I thought originally, but I soon learned my lesson because I introduce bug because I’d forgotten the complexities of computing the difference between two POSIXct values.

    With mocking, however, I can “manipulate time” by mocking unix_time() so that it returns the value of a variable I control. Now I can write a reliable test:

    test_that("elapsed() measures elapsed time", {
      time <- 1
      local_mocked_bindings(unix_time = function() time)
    
      timer <- elapsed()
      expect_equal(timer(), 0)
    
      time <- 2
      expect_equal(timer(), 1)
    })
    #> Test passed with 2 successes 🌈.

    How does mocking work?

    To finish up, it’s worth discussing how mocking works. The fundamental challenge of mocking is that you want it to be “hygienic”, i.e. it should only affect the operation of your package code, not all running code. You can see why this might be problematic if you imagine mocking a function that testthat itself uses: you don’t want to accidentally break testthat while trying to test your code! To achieve this goal, local_mocked_bindings() works by modifying your package’s namespace environment.

    You can implement the basic idea using base R code like this:

    old <- getFromNamespace("my_function", "mypackage")
    assignInNamespace("my_function", new, "mypackage")
    
    # run the test...
    
    # restore the previous value
    assignInNamespace("my_function", old, "mypackage")

    This implementation leads to two limitations of local_mocked_bindings():

    1. The package namespace is locked, which means that you can’t add new bindings to it. That means if you want to mock base functions, you have to provide some binding that can be overridden. The easiest way to do this is with something like mean <- NULL. This creates a binding that local_mocked_bindings() can modify, but because of R’s lexical scoping rules doesn’t affect ordinary calls.

    2. :: doesn’t use the package namespace, so if you want to mock an explicitly namespaced function, you either have import fun into your NAMESPACE (e.g., with @importFrom pkg fun) or create your own wrapper function that you can mock. Typically, one of these options will feel fairly natural.

    Overall, these limitations feel correct to me: local_mocked_bindings() makes it easy to temporarily change the implementation of functions that you have written, while offering workarounds to override the implementations of functions that others have written in the scope of your package.

    testthat/inst/doc/mocking.R0000644000176200001440000001027315130237645015417 0ustar liggesusers## ----------------------------------------------------------------------------- library(testthat) knitr::opts_chunk$set(collapse = TRUE, comment = "#>") # Pretend we're snapshotting snapper <- local_snapshotter(fail_on_new = FALSE) snapper$start_file("snapshotting.Rmd", "test") # Pretend we're testing testthat so we can use mocking Sys.setenv(TESTTHAT_PKG = "testthat") ## ----------------------------------------------------------------------------- check_installed <- function(pkg, min_version = NULL) { if (!requireNamespace(pkg, quietly = TRUE)) { stop(sprintf("{%s} is not installed.", pkg)) } if (!is.null(min_version)) { pkg_version <- packageVersion(pkg) if (pkg_version < min_version) { stop(sprintf( "{%s} version %s is installed, but %s is required.", pkg, pkg_version, min_version )) } } invisible() } ## ----------------------------------------------------------------------------- test_that("check_installed() checks package is installed", { expect_no_error(check_installed("testthat")) expect_snapshot(check_installed("doesntexist"), error = TRUE) }) ## ----------------------------------------------------------------------------- test_that("check_installed() checks minimum version", { expect_no_error(check_installed("testthat", "1.0.0")) expect_snapshot(check_installed("testthat", "99.99.999"), error = TRUE) }) ## ----------------------------------------------------------------------------- test_that("check_installed() checks minimum version", { expect_no_error(check_installed("testthat", "1.0.0")) expect_snapshot( check_installed("testthat", "99.99.999"), error = TRUE, transform = function(lines) gsub(packageVersion("testthat"), "", lines) ) }) ## ----------------------------------------------------------------------------- requireNamespace <- NULL packageVersion <- NULL ## ----------------------------------------------------------------------------- test_that("check_installed() checks package is installed", { local_mocked_bindings(requireNamespace = function(...) TRUE) expect_no_error(check_installed("package-name")) local_mocked_bindings(requireNamespace = function(...) FALSE) expect_snapshot(check_installed("package-name"), error = TRUE) }) ## ----------------------------------------------------------------------------- test_that("check_installed() checks minimum version", { local_mocked_bindings( requireNamespace = function(...) TRUE, packageVersion = function(...) numeric_version("2.0.0") ) expect_no_error(check_installed("package-name", "1.0.0")) expect_snapshot(check_installed("package-name", "3.4.5"), error = TRUE) }) ## ----------------------------------------------------------------------------- system_os <- NULL ## ----------------------------------------------------------------------------- # test_that("can skip on multiple oses", { # local_mocked_bindings(system_os = function() "windows") # # expect_skip(skip_on_os("windows")) # expect_skip(skip_on_os(c("windows", "linux"))) # expect_no_skip(skip_on_os("linux")) # }) ## ----------------------------------------------------------------------------- # local_mocked_bindings( # get_revdeps = function() character(), # gh_milestone_number = function(...) NA # ) ## ----------------------------------------------------------------------------- unix_time <- function() unclass(Sys.time()) unix_time() ## ----------------------------------------------------------------------------- elapsed <- function() { start <- unix_time() function() { unix_time() - start } } timer <- elapsed() Sys.sleep(0.5) timer() ## ----------------------------------------------------------------------------- test_that("elapsed() measures elapsed time", { time <- 1 local_mocked_bindings(unix_time = function() time) timer <- elapsed() expect_equal(timer(), 0) time <- 2 expect_equal(timer(), 1) }) ## ----------------------------------------------------------------------------- # old <- getFromNamespace("my_function", "mypackage") # assignInNamespace("my_function", new, "mypackage") # # # run the test... # # # restore the previous value # assignInNamespace("my_function", old, "mypackage") testthat/README.md0000644000176200001440000000505415127732671013410 0ustar liggesusers # testthat [![CRAN status](https://www.r-pkg.org/badges/version/testthat)](https://cran.r-project.org/package=testthat) [![R-CMD-check](https://github.com/r-lib/testthat/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/r-lib/testthat/actions/workflows/R-CMD-check.yaml) [![Codecov test coverage](https://codecov.io/gh/r-lib/testthat/graph/badge.svg)](https://app.codecov.io/gh/r-lib/testthat) ## Overview Testing your code can be painful and tedious, but it greatly increases the quality of your code. **testthat** tries to make testing as fun as possible, so that you get a visceral satisfaction from writing tests. Testing should be addictive, so you do it all the time. To make that happen, testthat: - Provides functions that make it easy to describe what you expect a function to do, including catching errors, warnings, and messages. - Easily integrates in your existing workflow, whether it’s informal testing on the command line, building test suites, or using R CMD check. - Displays test progress visually, showing a pass, fail, or error for every expectation. If you’re using the terminal or a recent version of RStudio, it’ll even colour the output. testthat draws inspiration from the xUnit family of testing packages, as well as from many of the innovative ruby testing libraries, like [rspec](https://rspec.info/), [testy](https://github.com/ahoward/testy), [bacon](https://github.com/leahneukirchen/bacon) and [cucumber](https://cucumber.io). testthat is the most popular unit testing package for R and is used by thousands of CRAN packages. If you’re not familiar with testthat, the [testing chapter](https://r-pkgs.org/testing-basics.html) in [R packages](https://r-pkgs.org) gives a good overview, along with workflow advice and concrete examples. ## Installation ``` r # Install the released version from CRAN install.packages("testthat") # Or the development version from GitHub: # install.packages("pak") pak::pak("r-lib/testthat") ``` ## Usage The easiest way to get started is with [usethis](https://github.com/r-lib/usethis). Assuming you’re in a package directory, just run `usethis::use_test("name")` to create a test file, and set up all the other infrastructure you need. If you’re using RStudio, press Cmd/Ctrl + Shift + T (or run `devtools::test()` if not) to run all the tests in a package. testthat/build/0000755000176200001440000000000015130237654013217 5ustar liggesuserstestthat/build/vignette.rds0000644000176200001440000000066215130237654015562 0ustar liggesusersRAO0-c (b<ş bL pl5lݲvokoⷭȡ{}ﳇrrӄ6!.ra $ )2J*-#_/2PObnQ`Dž&&v,I캡$A,\H8ݰ=,K)Y՝-y ;lś{yPEB(6j^T˧ey~DVq%[VTlU)r57T6_hM促t;g,4&Xh#F}4H,[LVu.V9U@R2 Z>5\Kg%rJY :05T-N"`I.@.X3Y-miBC|Fq kng S iIfǵtestthat/man/0000755000176200001440000000000015130037147012666 5ustar liggesuserstestthat/man/CheckReporter.Rd0000644000176200001440000000156515127561732015734 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-check.R \name{CheckReporter} \alias{CheckReporter} \title{Report results for \verb{R CMD check}} \description{ \verb{R CMD check} displays only the last 13 lines of the result, so this report is designed to ensure that you see something useful there. } \seealso{ Other reporters: \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/MultiReporter.Rd0000644000176200001440000000155615127561732016011 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-multi.R \name{MultiReporter} \alias{MultiReporter} \title{Run multiple reporters at the same time} \description{ This reporter is useful to use several reporters at the same time, e.g. adding a custom reporter without removing the current one. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/expect_no_success.Rd0000644000176200001440000000131315047715224016675 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-self-test.R \name{expect_no_success} \alias{expect_no_success} \alias{expect_no_failure} \title{Test for absence of success or failure} \usage{ expect_no_success(expr) expect_no_failure(expr) } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} These functions are deprecated because \code{\link[=expect_success]{expect_success()}} and \code{\link[=expect_failure]{expect_failure()}} now test for exactly one success or no failures, and exactly one failure and no successes. } \keyword{internal} testthat/man/set_max_fails.Rd0000644000176200001440000000072415040747537016011 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-progress.R \name{set_max_fails} \alias{set_max_fails} \title{Set maximum number of test failures allowed before aborting the run} \usage{ set_max_fails(n) } \arguments{ \item{n}{Maximum number of failures allowed.} } \description{ This sets the \code{TESTTHAT_MAX_FAILS} env var which will affect both the current R process and any processes launched from it. } \keyword{internal} testthat/man/expect_snapshot_file.Rd0000644000176200001440000001243315054412736017374 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snapshot-file.R \name{expect_snapshot_file} \alias{expect_snapshot_file} \alias{announce_snapshot_file} \alias{compare_file_binary} \alias{compare_file_text} \title{Do you expect this code to create the same file as last time?} \usage{ expect_snapshot_file( path, name = basename(path), binary = deprecated(), cran = FALSE, compare = NULL, transform = NULL, variant = NULL ) announce_snapshot_file(path, name = basename(path)) compare_file_binary(old, new) compare_file_text(old, new) } \arguments{ \item{path}{Path to file to snapshot. Optional for \code{announce_snapshot_file()} if \code{name} is supplied.} \item{name}{Snapshot name, taken from \code{path} by default.} \item{binary}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} Please use the \code{compare} argument instead.} \item{cran}{Should these expectations be verified on CRAN? By default, they are not, because snapshot tests tend to be fragile because they often rely on minor details of dependencies.} \item{compare}{A function used to compare the snapshot files. It should take two inputs, the paths to the \code{old} and \code{new} snapshot, and return either \code{TRUE} or \code{FALSE}. This defaults to \code{compare_file_text} if \code{name} has extension \code{.r}, \code{.R}, \code{.Rmd}, \code{.md}, or \code{.txt}, and otherwise uses \code{compare_file_binary}. \code{compare_file_binary()} compares byte-by-byte and \code{compare_file_text()} compares lines-by-line, ignoring the difference between Windows and Mac/Linux line endings.} \item{transform}{Optionally, a function to scrub sensitive or stochastic text from the output. Should take a character vector of lines as input and return a modified character vector as output.} \item{variant}{If not-\code{NULL}, results will be saved in \verb{_snaps/\{variant\}/\{test\}/\{name\}}. This allows you to create different snapshots for different scenarios, like different operating systems or different R versions. Note that there's no way to declare all possible variants up front which means that as soon as you start using variants, you are responsible for deleting snapshot variants that are no longer used. (testthat will still delete all variants if you delete the test.)} \item{old, new}{Paths to old and new snapshot files.} } \description{ Whole file snapshot testing is designed for testing objects that don't have a convenient textual representation, with initial support for images (\code{.png}, \code{.jpg}, \code{.svg}), data frames (\code{.csv}), and text files (\code{.R}, \code{.txt}, \code{.json}, ...). The first time \code{expect_snapshot_file()} is run, it will create \verb{_snaps/\{test\}/\{name\}.\{ext\}} containing reference output. Future runs will be compared to this reference: if different, the test will fail and the new results will be saved in \verb{_snaps/\{test\}/\{name\}.new.\{ext\}}. To review failures, call \code{\link[=snapshot_review]{snapshot_review()}}. We generally expect this function to be used via a wrapper that takes care of ensuring that output is as reproducible as possible, e.g. automatically skipping tests where it's known that images can't be reproduced exactly. } \section{Announcing snapshots}{ testthat automatically detects dangling snapshots that have been written to the \verb{_snaps} directory but which no longer have corresponding R code to generate them. These dangling files are automatically deleted so they don't clutter the snapshot directory. This can cause problems if your test is conditionally executed, either because of an \code{if} statement or a \code{\link[=skip]{skip()}}. To avoid files being deleted in this case, you can call \code{announce_snapshot_file()} before the conditional code. \if{html}{\out{
    }}\preformatted{test_that("can save a file", \{ if (!can_save()) \{ announce_snapshot_file(name = "data.txt") skip("Can't save file") \} path <- withr::local_tempfile() expect_snapshot_file(save_file(path, mydata()), "data.txt") \}) }\if{html}{\out{
    }} } \examples{ # To use expect_snapshot_file() you'll typically need to start by writing # a helper function that creates a file from your code, returning a path save_png <- function(code, width = 400, height = 400) { path <- tempfile(fileext = ".png") png(path, width = width, height = height) on.exit(dev.off()) code path } path <- save_png(plot(1:5)) path \dontrun{ expect_snapshot_file(save_png(hist(mtcars$mpg)), "plot.png") } # You'd then also provide a helper that skips tests where you can't # be sure of producing exactly the same output. expect_snapshot_plot <- function(name, code) { # Announce the file before touching skips or running `code`. This way, # if the skips are active, testthat will not auto-delete the corresponding # snapshot file. name <- paste0(name, ".png") announce_snapshot_file(name = name) # Other packages might affect results skip_if_not_installed("ggplot2", "2.0.0") # Or maybe the output is different on some operating systems skip_on_os("windows") # You'll need to carefully think about and experiment with these skips path <- save_png(code) expect_snapshot_file(path, name) } } testthat/man/snapshot_download_gh.Rd0000644000176200001440000000201015127554030017353 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snapshot-github.R \name{snapshot_download_gh} \alias{snapshot_download_gh} \title{Download snapshots from GitHub} \usage{ snapshot_download_gh(repository, run_id, dest_dir = ".") } \arguments{ \item{repository}{Repository owner/name, e.g. \code{"r-lib/testthat"}.} \item{run_id}{Run ID, e.g. \code{"47905180716"}. You can find this in the action url.} \item{dest_dir}{Package root directory. Defaults to the current directory.} } \description{ If your snapshots fail on GitHub, it can be a pain to figure out exactly why, or to incorporate them into your local package. This function makes it easy, only requiring you to interactively select which job you want to take the artifacts from. Note that you should not generally need to use this function manually; instead copy and paste from the hint emitted on GitHub. This hint is only emitted when running in a job named "R-CMD-check", since that's where the testthat artifact is typically uploaded. } testthat/man/expect_less_than.Rd0000644000176200001440000000104614164710002016500 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-comparison.R \name{expect_less_than} \alias{expect_less_than} \alias{expect_more_than} \title{Deprecated numeric comparison functions} \usage{ expect_less_than(...) expect_more_than(...) } \arguments{ \item{...}{All arguments passed on to \code{expect_lt()}/\code{expect_gt()}.} } \description{ These functions have been deprecated in favour of the more concise \code{\link[=expect_gt]{expect_gt()}} and \code{\link[=expect_lt]{expect_lt()}}. } \keyword{internal} testthat/man/expect_length.Rd0000644000176200001440000000356115054053615016016 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-shape.R \name{expect_length} \alias{expect_length} \alias{expect_shape} \title{Do you expect an object with this length or shape?} \usage{ expect_length(object, n) expect_shape(object, ..., nrow, ncol, dim) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{n}{Expected length.} \item{...}{Not used; used to force naming of other arguments.} \item{nrow, ncol}{Expected \code{\link[=nrow]{nrow()}}/\code{\link[=ncol]{ncol()}} of \code{object}.} \item{dim}{Expected \code{\link[=dim]{dim()}} of \code{object}.} } \description{ \code{expect_length()} inspects the \code{\link[=length]{length()}} of an object; \code{expect_shape()} inspects the "shape" (i.e. \code{\link[=nrow]{nrow()}}, \code{\link[=ncol]{ncol()}}, or \code{\link[=dim]{dim()}}) of higher-dimensional objects like data.frames, matrices, and arrays. } \examples{ expect_length(1, 1) expect_length(1:10, 10) show_failure(expect_length(1:10, 1)) x <- matrix(1:9, nrow = 3) expect_shape(x, nrow = 3) show_failure(expect_shape(x, nrow = 4)) expect_shape(x, ncol = 3) show_failure(expect_shape(x, ncol = 4)) expect_shape(x, dim = c(3, 3)) show_failure(expect_shape(x, dim = c(3, 4, 5))) } \seealso{ \code{\link[=expect_vector]{expect_vector()}} to make assertions about the "size" of a vector. Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/snapshot_accept.Rd0000644000176200001440000000224215054053615016336 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snapshot-manage.R \name{snapshot_accept} \alias{snapshot_accept} \alias{snapshot_reject} \alias{snapshot_review} \title{Accept or reject modified snapshots} \usage{ snapshot_accept(files = NULL, path = "tests/testthat") snapshot_reject(files = NULL, path = "tests/testthat") snapshot_review(files = NULL, path = "tests/testthat", ...) } \arguments{ \item{files}{Optionally, filter effects to snapshots from specified files. This can be a snapshot name (e.g. \code{foo} or \code{foo.md}), a snapshot file name (e.g. \code{testfile/foo.txt}), or a snapshot file directory (e.g. \verb{testfile/}).} \item{path}{Path to tests.} \item{...}{Additional arguments passed on to \code{\link[shiny:runApp]{shiny::runApp()}}.} } \description{ \itemize{ \item \code{snapshot_accept()} accepts all modified snapshots. \item \code{snapshot_reject()} rejects all modified snapshots by deleting the \code{.new} variants. \item \code{snapshot_review()} opens a Shiny app that shows a visual diff of each modified snapshot. This is particularly useful for whole file snapshots created by \code{expect_snapshot_file()}. } } testthat/man/local_test_context.Rd0000644000176200001440000001104415047715224017060 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/local.R \name{local_test_context} \alias{local_test_context} \alias{local_reproducible_output} \title{Temporarily set options for maximum reproducibility} \usage{ local_test_context(.env = parent.frame()) local_reproducible_output( width = 80, crayon = FALSE, unicode = FALSE, rstudio = FALSE, hyperlinks = FALSE, lang = "C", .env = parent.frame() ) } \arguments{ \item{.env}{Environment to use for scoping; expert use only.} \item{width}{Value of the \code{"width"} option.} \item{crayon}{Determines whether or not crayon (now cli) colour should be applied.} \item{unicode}{Value of the \code{"cli.unicode"} option. The test is skipped if \code{l10n_info()$`UTF-8`} is \code{FALSE}.} \item{rstudio}{Should we pretend that we're inside of RStudio?} \item{hyperlinks}{Should we use ANSI hyperlinks.} \item{lang}{Optionally, supply a BCP47 language code to set the language used for translating error messages. This is a lower case two letter \href{https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes}{ISO 639 country code}, optionally followed by "_" or "-" and an upper case two letter \href{https://en.wikipedia.org/wiki/ISO_3166-2}{ISO 3166 region code}.} } \description{ \code{local_test_context()} is run automatically by \code{test_that()} but you may want to run it yourself if you want to replicate test results interactively. If run inside a function, the effects are automatically reversed when the function exits; if running in the global environment, use \code{\link[withr:defer]{withr::deferred_run()}} to undo. \code{local_reproducible_output()} is run automatically by \code{test_that()} in the 3rd edition. You might want to call it to override the the default settings inside a test, if you want to test Unicode, coloured output, or a non-standard width. } \details{ \code{local_test_context()} sets \code{TESTTHAT = "true"}, which ensures that \code{\link[=is_testing]{is_testing()}} returns \code{TRUE} and allows code to tell if it is run by testthat. In the third edition, \code{local_test_context()} also calls \code{local_reproducible_output()} which temporary sets the following options: \itemize{ \item \code{cli.dynamic = FALSE} so that tests assume that they are not run in a dynamic console (i.e. one where you can move the cursor around). \item \code{cli.unicode} (default: \code{FALSE}) so that the cli package never generates unicode output (normally cli uses unicode on Linux/Mac but not Windows). Windows can't easily save unicode output to disk, so it must be set to false for consistency. \item \code{cli.condition_width = Inf} so that new lines introduced while width-wrapping condition messages don't interfere with message matching. \item \code{crayon.enabled} (default: \code{FALSE}) suppresses ANSI colours generated by the cli and crayon packages (normally colours are used if cli detects that you're in a terminal that supports colour). \item \code{cli.num_colors} (default: \code{1L}) Same as the crayon option. \item \code{lifecycle_verbosity = "warning"} so that every lifecycle problem always generates a warning (otherwise deprecated functions don't generate a warning every time). \item \code{max.print = 99999} so the same number of values are printed. \item \code{OutDec = "."} so numbers always uses \code{.} as the decimal point (European users sometimes set \code{OutDec = ","}). \item \code{rlang_interactive = FALSE} so that \code{\link[rlang:is_interactive]{rlang::is_interactive()}} returns \code{FALSE}, and code that uses it pretends you're in a non-interactive environment. \item \code{useFancyQuotes = FALSE} so base R functions always use regular (straight) quotes (otherwise the default is locale dependent, see \code{\link[=sQuote]{sQuote()}} for details). \item \code{width} (default: 80) to control the width of printed output (usually this varies with the size of your console). } And modifies the following env vars: \itemize{ \item Unsets \code{RSTUDIO}, which ensures that RStudio is never detected as running. \item Sets \code{LANGUAGE = "en"}, which ensures that no message translation occurs. } Finally, it sets the collation locale to "C", which ensures that character sorting the same regardless of system locale. } \examples{ local({ local_test_context() cat(cli::col_blue("Text will not be colored")) cat(cli::symbol$ellipsis) cat("\n") }) test_that("test ellipsis", { local_reproducible_output(unicode = FALSE) expect_equal(cli::symbol$ellipsis, "...") local_reproducible_output(unicode = TRUE) expect_equal(cli::symbol$ellipsis, "\u2026") }) } testthat/man/make_expectation.Rd0000644000176200001440000000121313171137773016503 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/make-expectation.R \name{make_expectation} \alias{make_expectation} \title{Make an equality test.} \usage{ make_expectation(x, expectation = "equals") } \arguments{ \item{x}{a vector of values} \item{expectation}{the type of equality you want to test for (\code{"equals"}, \code{"is_equivalent_to"}, \code{"is_identical_to"})} } \description{ This a convenience function to make a expectation that checks that input stays the same. } \examples{ x <- 1:10 make_expectation(x) make_expectation(mtcars$mpg) df <- data.frame(x = 2) make_expectation(df) } \keyword{internal} testthat/man/set_state_inspector.Rd0000644000176200001440000000521415072252215017240 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-state.R \name{set_state_inspector} \alias{set_state_inspector} \title{Check for global state changes} \usage{ set_state_inspector(callback, tolerance = testthat_tolerance()) } \arguments{ \item{callback}{Either a zero-argument function that returns an object capturing global state that you're interested in, or \code{NULL}.} \item{tolerance}{If non-\code{NULL}, used as threshold for ignoring small floating point difference when comparing numeric vectors. Using any non-\code{NULL} value will cause integer and double vectors to be compared based on their values, not their types, and will ignore the difference between \code{NaN} and \code{NA_real_}. It uses the same algorithm as \code{\link[=all.equal]{all.equal()}}, i.e., first we generate \code{x_diff} and \code{y_diff} by subsetting \code{x} and \code{y} to look only locations with differences. Then we check that \code{mean(abs(x_diff - y_diff)) / mean(abs(y_diff))} (or just \code{mean(abs(x_diff - y_diff))} if \code{y_diff} is small) is less than \code{tolerance}.} } \description{ One of the most pernicious challenges to debug is when a test runs fine in your test suite, but fails when you run it interactively (or similarly, it fails randomly when running your tests in parallel). One of the most common causes of this problem is accidentally changing global state in a previous test (e.g. changing an option, an environment variable, or the working directory). This is hard to debug, because it's very hard to figure out which test made the change. Luckily testthat provides a tool to figure out if tests are changing global state. You can register a state inspector with \code{set_state_inspector()} and testthat will run it before and after each test, store the results, then report if there are any differences. For example, if you wanted to see if any of your tests were changing options or environment variables, you could put this code in \code{tests/testthat/helper-state.R}: \if{html}{\out{
    }}\preformatted{set_state_inspector(function() \{ list( options = options(), envvars = Sys.getenv() ) \}) }\if{html}{\out{
    }} (You might discover other packages outside your control are changing the global state, in which case you might want to modify this function to ignore those values.) Other problems that can be troublesome to resolve are CRAN check notes that report things like connections being left open. You can easily debug that problem with: \if{html}{\out{
    }}\preformatted{set_state_inspector(function() \{ getAllConnections() \}) }\if{html}{\out{
    }} } testthat/man/test_that.Rd0000644000176200001440000000316115040747537015170 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-that.R \name{test_that} \alias{test_that} \title{Run a test} \usage{ test_that(desc, code) } \arguments{ \item{desc}{Test name. Names should be brief, but evocative. It's common to write the description so that it reads like a natural sentence, e.g. \code{test_that("multiplication works", { ... })}.} \item{code}{Test code containing expectations. Braces (\code{{}}) should always be used in order to get accurate location data for test failures.} } \value{ When run interactively, returns \code{invisible(TRUE)} if all tests pass, otherwise throws an error. } \description{ A test encapsulates a series of expectations about a small, self-contained unit of functionality. Each test contains one or more expectations, such as \code{\link[=expect_equal]{expect_equal()}} or \code{\link[=expect_error]{expect_error()}}, and lives in a \verb{test/testhat/test*} file, often together with other tests that relate to the same function or set of functions. Each test has its own execution environment, so an object created in a test also dies with the test. Note that this cleanup does not happen automatically for other aspects of global state, such as session options or filesystem changes. Avoid changing global state, when possible, and reverse any changes that you do make. } \examples{ test_that("trigonometric functions match identities", { expect_equal(sin(pi / 4), 1 / sqrt(2)) expect_equal(cos(pi / 4), 1 / sqrt(2)) expect_equal(tan(pi / 4), 1) }) \dontrun{ test_that("trigonometric functions match identities", { expect_equal(sin(pi / 4), 1) }) } } testthat/man/expect_invisible.Rd0000644000176200001440000000174415047715224016525 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-invisible.R \name{expect_invisible} \alias{expect_invisible} \alias{expect_visible} \title{Do you expect the result to be (in)visible?} \usage{ expect_invisible(call, label = NULL) expect_visible(call, label = NULL) } \arguments{ \item{call}{A function call.} \item{label}{Used to customise failure messages. For expert use only.} } \value{ The evaluated \code{call}, invisibly. } \description{ Use this to test whether a function returns a visible or invisible output. Typically you'll use this to check that functions called primarily for their side-effects return their data argument invisibly. } \examples{ expect_invisible(x <- 10) expect_visible(x) # Typically you'll assign the result of the expectation so you can # also check that the value is as you expect. greet <- function(name) { message("Hi ", name) invisible(name) } out <- expect_invisible(greet("Hadley")) expect_equal(out, "Hadley") } testthat/man/comparison-expectations.Rd0000644000176200001440000000255015104404205020027 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-comparison.R \name{comparison-expectations} \alias{comparison-expectations} \alias{expect_lt} \alias{expect_lte} \alias{expect_gt} \alias{expect_gte} \title{Do you expect a value bigger or smaller than this?} \usage{ expect_lt(object, expected, label = NULL, expected.label = NULL) expect_lte(object, expected, label = NULL, expected.label = NULL) expect_gt(object, expected, label = NULL, expected.label = NULL) expect_gte(object, expected, label = NULL, expected.label = NULL) } \arguments{ \item{object, expected}{A value to compare and its expected bound.} \item{label, expected.label}{Used to customise failure messages. For expert use only.} } \description{ These functions compare values of comparable data types, such as numbers, dates, and times. } \examples{ a <- 9 expect_lt(a, 10) \dontrun{ expect_lt(11, 10) } a <- 11 expect_gt(a, 10) \dontrun{ expect_gt(9, 10) } } \seealso{ Other expectations: \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/RStudioReporter.Rd0000644000176200001440000000150115127561732016276 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-rstudio.R \name{RStudioReporter} \alias{RStudioReporter} \title{Report results to RStudio} \description{ This reporter is designed for output to RStudio. It produces results in any easily parsed form. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/expect_reference.Rd0000644000176200001440000000337715054053615016500 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-reference.R \name{expect_reference} \alias{expect_reference} \title{Do you expect a reference to this object?} \usage{ expect_reference( object, expected, info = NULL, label = NULL, expected.label = NULL ) } \arguments{ \item{object, expected}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label, expected.label}{Used to customise failure messages. For expert use only.} } \description{ \code{expect_reference()} compares the underlying memory addresses of two symbols. It is for expert use only. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{expect_reference()} is deprecated in the third edition. If you know what you're doing, and you really need this behaviour, just use \code{is_reference()} directly: \code{expect_true(rlang::is_reference(x, y))}. } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} \keyword{internal} testthat/man/expect_that.Rd0000644000176200001440000000317515047715224015501 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/old-school.R \name{expect_that} \alias{expect_that} \title{Expect that a condition holds.} \usage{ expect_that(object, condition, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{condition, }{a function that returns whether or not the condition is met, and if not, an error message to display.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \value{ the (internal) expectation result as an invisible list } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} An old style of testing that's no longer encouraged. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} This style of testing is formally deprecated as of the 3rd edition. Use a more specific \code{expect_} function instead. } \examples{ expect_that(5 * 2, equals(10)) expect_that(sqrt(2) ^ 2, equals(2)) \dontrun{ expect_that(sqrt(2) ^ 2, is_identical_to(2)) } } \seealso{ \code{\link[=fail]{fail()}} for an expectation that always fails. } \keyword{internal} testthat/man/source_file.Rd0000644000176200001440000000276315054053615015467 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/source.R \name{source_file} \alias{source_file} \alias{source_dir} \alias{source_test_helpers} \alias{source_test_setup} \alias{source_test_teardown} \title{Source a file, directory of files, or various important subsets} \usage{ source_file( path, env = test_env(), chdir = TRUE, desc = NULL, wrap = TRUE, shuffle = FALSE, error_call = caller_env() ) source_dir( path, pattern = "\\\\.[rR]$", env = test_env(), chdir = TRUE, wrap = TRUE, shuffle = FALSE ) source_test_helpers(path = "tests/testthat", env = test_env()) source_test_setup(path = "tests/testthat", env = test_env()) source_test_teardown(path = "tests/testthat", env = test_env()) } \arguments{ \item{path}{Path to files.} \item{env}{Environment in which to evaluate code.} \item{chdir}{Change working directory to \code{dirname(path)}?} \item{desc}{A character vector used to filter tests. This is used to (recursively) filter the content of the file, so that only the non-test code up to and including the matching test is run.} \item{wrap}{Automatically wrap all code within \code{\link[=test_that]{test_that()}}? This ensures that all expectations are reported, even if outside a test block.} \item{shuffle}{If \code{TRUE}, randomly reorder the top-level expressions in the file.} \item{pattern}{Regular expression used to filter files.} } \description{ These are used by \code{\link[=test_dir]{test_dir()}} and friends } \keyword{internal} testthat/man/test_dir.Rd0000644000176200001440000000546515104404205014776 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-files.R \name{test_dir} \alias{test_dir} \title{Run all tests in a directory} \usage{ test_dir( path, filter = NULL, reporter = NULL, env = NULL, ..., load_helpers = TRUE, stop_on_failure = TRUE, stop_on_warning = FALSE, package = NULL, load_package = c("none", "installed", "source"), shuffle = FALSE ) } \arguments{ \item{path}{Path to directory containing tests.} \item{filter}{If not \code{NULL}, only tests with file names matching this regular expression will be executed. Matching is performed on the file name after it's stripped of \code{"test-"} and \code{".R"}.} \item{reporter}{Reporter to use to summarise output. Can be supplied as a string (e.g. "summary") or as an R6 object (e.g. \code{SummaryReporter$new()}). See \link{Reporter} for more details and a list of built-in reporters.} \item{env}{Environment in which to execute the tests. Expert use only.} \item{...}{Additional arguments passed to \code{\link[=grepl]{grepl()}} to control filtering.} \item{load_helpers}{Source helper files before running the tests?} \item{stop_on_failure}{If \code{TRUE}, throw an error if any tests fail.} \item{stop_on_warning}{If \code{TRUE}, throw an error if any tests generate warnings.} \item{package}{If these tests belong to a package, the name of the package.} \item{load_package}{Strategy to use for load package code: \itemize{ \item "none", the default, doesn't load the package. \item "installed", uses \code{\link[=library]{library()}} to load an installed package. \item "source", uses \code{\link[pkgload:load_all]{pkgload::load_all()}} to a source package. To configure the arguments passed to \code{load_all()}, add this field in your DESCRIPTION file: \if{html}{\out{
    }}\preformatted{Config/testthat/load-all: list(export_all = FALSE, helpers = FALSE) }\if{html}{\out{
    }} }} \item{shuffle}{If \code{TRUE}, randomly reorder the top-level expressions in the file.} } \value{ A list (invisibly) containing data about the test results. } \description{ This function is the low-level workhorse that powers \code{\link[=test_local]{test_local()}} and \code{\link[=test_package]{test_package()}}. Generally, you should not call this function directly. In particular, you are responsible for ensuring that the functions to test are available in the test \code{env} (e.g. via \code{load_package}). See \code{vignette("special-files")} to learn more about the conventions for test, helper, and setup files that testthat uses, and what you might use each for. } \section{Environments}{ Each test is run in a clean environment to keep tests as isolated as possible. For package tests, that environment inherits from the package's namespace environment, so that tests can access internal functions and objects. } testthat/man/find_test_scripts.Rd0000644000176200001440000000234214164710002016676 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-files.R \name{find_test_scripts} \alias{find_test_scripts} \title{Find test files} \usage{ find_test_scripts( path, filter = NULL, invert = FALSE, ..., full.names = TRUE, start_first = NULL ) } \arguments{ \item{path}{path to tests} \item{filter}{If not \code{NULL}, only tests with file names matching this regular expression will be executed. Matching is performed on the file name after it's stripped of \code{"test-"} and \code{".R"}.} \item{invert}{If \code{TRUE} return files which \strong{don't} match.} \item{...}{Additional arguments passed to \code{\link[=grepl]{grepl()}} to control filtering.} \item{start_first}{A character vector of file patterns (globs, see \code{\link[utils:glob2rx]{utils::glob2rx()}}). The patterns are for the file names (base names), not for the whole paths. testthat starts the files matching the first pattern first, then the ones matching the second, etc. and then the rest of the files, alphabetically. Parallel tests tend to finish quicker if you start the slowest files first. \code{NULL} means alphabetical order.} } \value{ A character vector of paths } \description{ Find test files } \keyword{internal} testthat/man/expect_known_output.Rd0000644000176200001440000000634615047715224017320 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-known.R \name{expect_known_output} \alias{expect_known_output} \alias{expect_known_value} \alias{expect_equal_to_reference} \alias{expect_known_hash} \title{Do you expect the results/output to equal a known value?} \usage{ expect_known_output( object, file, update = TRUE, ..., info = NULL, label = NULL, print = FALSE, width = 80 ) expect_known_value( object, file, update = TRUE, ..., info = NULL, label = NULL, version = 2 ) expect_known_hash(object, hash = NULL) } \arguments{ \item{file}{File path where known value/output will be stored.} \item{update}{Should the file be updated? Defaults to \code{TRUE}, with the expectation that you'll notice changes because of the first failure, and then see the modified files in git.} \item{...}{Passed on to \code{\link[waldo:compare]{waldo::compare()}}.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{print}{If \code{TRUE} and the result of evaluating \code{code} is visible, print the result using \code{testthat_print()}.} \item{width}{Number of characters per line of output. This does not inherit from \code{getOption("width")} so that tests always use the same output width, minimising spurious differences.} \item{version}{The serialization format version to use. The default, 2, was the default format from R 1.4.0 to 3.5.3. Version 3 became the default from R 3.6.0 and can only be read by R versions 3.5.0 and higher.} \item{hash}{Known hash value. Leave empty and you'll be informed what to use in the test output.} } \description{ For complex printed output and objects, it is often challenging to describe exactly what you expect to see. \code{expect_known_value()} and \code{expect_known_output()} provide a slightly weaker guarantee, simply asserting that the values have not changed since the last time that you ran them. } \details{ These expectations should be used in conjunction with git, as otherwise there is no way to revert to previous values. Git is particularly useful in conjunction with \code{expect_known_output()} as the diffs will show you exactly what has changed. Note that known values updates will only be updated when running tests interactively. \verb{R CMD check} clones the package source so any changes to the reference files will occur in a temporary directory, and will not be synchronised back to the source package. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{expect_known_output()} and friends are deprecated in the 3rd edition; please use \code{\link[=expect_snapshot_output]{expect_snapshot_output()}} and friends instead. } \examples{ tmp <- tempfile() # The first run always succeeds expect_known_output(mtcars[1:10, ], tmp, print = TRUE) # Subsequent runs will succeed only if the file is unchanged # This will succeed: expect_known_output(mtcars[1:10, ], tmp, print = TRUE) \dontrun{ # This will fail expect_known_output(mtcars[1:9, ], tmp, print = TRUE) } } \keyword{internal} testthat/man/expect_snapshot.Rd0000644000176200001440000000764015054412736016401 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snapshot.R \name{expect_snapshot} \alias{expect_snapshot} \title{Do you expect this code to run the same way as last time?} \usage{ expect_snapshot( x, cran = FALSE, error = FALSE, transform = NULL, variant = NULL, cnd_class = FALSE ) } \arguments{ \item{x}{Code to evaluate.} \item{cran}{Should these expectations be verified on CRAN? By default, they are not, because snapshot tests tend to be fragile because they often rely on minor details of dependencies.} \item{error}{Do you expect the code to throw an error? The expectation will fail (even on CRAN) if an unexpected error is thrown or the expected error is not thrown.} \item{transform}{Optionally, a function to scrub sensitive or stochastic text from the output. Should take a character vector of lines as input and return a modified character vector as output.} \item{variant}{If non-\code{NULL}, results will be saved in \verb{_snaps/\{variant\}/\{test.md\}}, so \code{variant} must be a single string suitable for use as a directory name. You can use variants to deal with cases where the snapshot output varies and you want to capture and test the variations. Common use cases include variations for operating system, R version, or version of key dependency. Variants are an advanced feature. When you use them, you'll need to carefully think about your testing strategy to ensure that all important variants are covered by automated tests, and ensure that you have a way to get snapshot changes out of your CI system and back into the repo. Note that there's no way to declare all possible variants up front which means that as soon as you start using variants, you are responsible for deleting snapshot variants that are no longer used. (testthat will still delete all variants if you delete the test.)} \item{cnd_class}{Whether to include the class of messages, warnings, and errors in the snapshot. Only the most specific class is included, i.e. the first element of \code{class(cnd)}.} } \description{ Snapshot tests (aka golden tests) are similar to unit tests except that the expected result is stored in a separate file that is managed by testthat. Snapshot tests are useful for when the expected value is large, or when the intent of the code is something that can only be verified by a human (e.g. this is a useful error message). Learn more in \code{vignette("snapshotting")}. \code{expect_snapshot()} runs code as if you had executed it at the console, and records the results, including output, messages, warnings, and errors. If you just want to compare the result, try \code{\link[=expect_snapshot_value]{expect_snapshot_value()}}. } \section{Workflow}{ The first time that you run a snapshot expectation it will run \code{x}, capture the results, and record them in \verb{tests/testthat/_snaps/\{test\}.md}. Each test file gets its own snapshot file, e.g. \code{test-foo.R} will get \verb{_snaps/foo.md}. It's important to review the Markdown files and commit them to git. They are designed to be human readable, and you should always review new additions to ensure that the salient information has been captured. They should also be carefully reviewed in pull requests, to make sure that snapshots have updated in the expected way. On subsequent runs, the result of \code{x} will be compared to the value stored on disk. If it's different, the expectation will fail, and a new file \verb{_snaps/\{test\}.new.md} will be created. If the change was deliberate, you can approve the change with \code{\link[=snapshot_accept]{snapshot_accept()}} and then the tests will pass the next time you run them. Note that snapshotting can only work when executing a complete test file (with \code{\link[=test_file]{test_file()}}, \code{\link[=test_dir]{test_dir()}}, or friends) because there's otherwise no way to figure out the snapshot path. If you run snapshot tests interactively, they'll just display the current value. } testthat/man/evaluate_promise.Rd0000644000176200001440000000136013171137773016532 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/evaluate-promise.R \name{evaluate_promise} \alias{evaluate_promise} \title{Evaluate a promise, capturing all types of output.} \usage{ evaluate_promise(code, print = FALSE) } \arguments{ \item{code}{Code to evaluate.} } \value{ A list containing \item{result}{The result of the function} \item{output}{A string containing all the output from the function} \item{warnings}{A character vector containing the text from each warning} \item{messages}{A character vector containing the text from each message} } \description{ Evaluate a promise, capturing all types of output. } \examples{ evaluate_promise({ print("1") message("2") warning("3") 4 }) } \keyword{internal} testthat/man/is_informative_error.Rd0000644000176200001440000000170115077746030017413 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deprec-condition.R \name{is_informative_error} \alias{is_informative_error} \title{Is an error informative?} \usage{ is_informative_error(x, ...) } \arguments{ \item{x}{An error object.} \item{...}{These dots are for future extensions and must be empty.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{is_informative_error()} is a generic predicate that indicates whether testthat users should explicitly test for an error class. Since we no longer recommend you do that, this generic has been deprecated. } \details{ A few classes are hard-coded as uninformative: \itemize{ \item \code{simpleError} \item \code{rlang_error} unless a subclass is detected \item \code{Rcpp::eval_error} \item \code{Rcpp::exception} } } \keyword{internal} testthat/man/skip.Rd0000644000176200001440000001022615054412736014132 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/skip.R \name{skip} \alias{skip} \alias{skip_if_not} \alias{skip_if} \alias{skip_if_not_installed} \alias{skip_unless_r} \alias{skip_if_offline} \alias{skip_on_cran} \alias{local_on_cran} \alias{skip_on_os} \alias{skip_on_ci} \alias{skip_on_covr} \alias{skip_on_bioc} \alias{skip_if_translated} \title{Skip a test for various reasons} \usage{ skip(message = "Skipping") skip_if_not(condition, message = NULL) skip_if(condition, message = NULL) skip_if_not_installed(pkg, minimum_version = NULL) skip_unless_r(spec) skip_if_offline(host = "captive.apple.com") skip_on_cran() local_on_cran(on_cran = TRUE, frame = caller_env()) skip_on_os(os, arch = NULL) skip_on_ci() skip_on_covr() skip_on_bioc() skip_if_translated(msgid = "'\%s' not found") } \arguments{ \item{message}{A message describing why the test was skipped.} \item{condition}{Boolean condition to check. \code{skip_if_not()} will skip if \code{FALSE}, \code{skip_if()} will skip if \code{TRUE}.} \item{pkg}{Name of package to check for} \item{minimum_version}{Minimum required version for the package} \item{spec}{A version specification like '>= 4.1.0' denoting that this test should only be run on R versions 4.1.0 and later.} \item{host}{A string with a hostname to lookup} \item{on_cran}{Pretend we're on CRAN (\code{TRUE}) or not (\code{FALSE}).} \item{frame}{Calling frame to tie change to; expect use only.} \item{os}{Character vector of one or more operating systems to skip on. Supported values are \code{"windows"}, \code{"mac"}, \code{"linux"}, \code{"solaris"}, and \code{"emscripten"}.} \item{arch}{Character vector of one or more architectures to skip on. Common values include \code{"i386"} (32 bit), \code{"x86_64"} (64 bit), and \code{"aarch64"} (M1 mac). Supplying \code{arch} makes the test stricter; i.e. both \code{os} and \code{arch} must match in order for the test to be skipped.} \item{msgid}{R message identifier used to check for translation: the default uses a message included in most translation packs. See the complete list in \href{https://github.com/wch/r-source/blob/master/src/library/base/po/R-base.pot}{\code{R-base.pot}}.} } \description{ \code{skip_if()} and \code{skip_if_not()} allow you to skip tests, immediately concluding a \code{\link[=test_that]{test_that()}} block without executing any further expectations. This allows you to skip a test without failure, if for some reason it can't be run (e.g. it depends on the feature of a specific operating system, or it requires a specific version of a package). See \code{vignette("skipping")} for more details. } \section{Helpers}{ \itemize{ \item \code{skip_if_not_installed("pkg")} skips tests if package "pkg" is not installed or cannot be loaded (using \code{requireNamespace()}). Generally, you can assume that suggested packages are installed, and you do not need to check for them specifically, unless they are particularly difficult to install. \item \code{skip_if_offline()} skips if an internet connection is not available (using \code{\link[curl:nslookup]{curl::nslookup()}}) or if the test is run on CRAN. Requires \{curl\} to be installed and included in the dependencies of your package. \item \code{skip_if_translated("msg")} skips tests if the "msg" is translated. \item \code{skip_on_bioc()} skips on Bioconductor (using the \code{IS_BIOC_BUILD_MACHINE} env var). \item \code{skip_on_cran()} skips on CRAN (using the \code{NOT_CRAN} env var set by devtools and friends). \code{local_on_cran()} gives you the ability to easily simulate what will happen on CRAN. \item \code{skip_on_covr()} skips when covr is running (using the \code{R_COVR} env var). \item \code{skip_on_ci()} skips on continuous integration systems like GitHub Actions, travis, and appveyor (using the \code{CI} env var). \item \code{skip_on_os()} skips on the specified operating system(s) ("windows", "mac", "linux", or "solaris"). } } \examples{ if (FALSE) skip("Some Important Requirement is not available") test_that("skip example", { expect_equal(1, 1L) # this expectation runs skip('skip') expect_equal(1, 2) # this one skipped expect_equal(1, 3) # this one is also skipped }) } testthat/man/expect_is.Rd0000644000176200001440000000243515047715224015152 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-inheritance.R \name{expect_is} \alias{expect_is} \title{Do you expect to inherit from this class?} \usage{ expect_is(object, class, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{class}{Class name passed to \code{inherits()}.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} \code{expect_is()} is an older form that uses \code{\link[=inherits]{inherits()}} without checking whether \code{x} is S3, S4, or neither. Instead, I'd recommend using \code{\link[=expect_type]{expect_type()}}, \code{\link[=expect_s3_class]{expect_s3_class()}}, or \code{\link[=expect_s4_class]{expect_s4_class()}} to more clearly convey your intent. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{expect_is()} is formally deprecated in the 3rd edition. } \keyword{internal} testthat/man/test_file.Rd0000644000176200001440000000304415054053615015137 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-files.R \name{test_file} \alias{test_file} \title{Run tests in a single file} \usage{ test_file( path, reporter = default_compact_reporter(), desc = NULL, package = NULL, shuffle = FALSE, ... ) } \arguments{ \item{path}{Path to file.} \item{reporter}{Reporter to use to summarise output. Can be supplied as a string (e.g. "summary") or as an R6 object (e.g. \code{SummaryReporter$new()}). See \link{Reporter} for more details and a list of built-in reporters.} \item{desc}{Optionally, supply a string here to run only a single test (\code{test_that()} or \code{describe()}) with this \code{desc}ription.} \item{package}{If these tests belong to a package, the name of the package.} \item{shuffle}{If \code{TRUE}, randomly reorder the top-level expressions in the file.} \item{...}{Additional parameters passed on to \code{test_dir()}} } \value{ A list (invisibly) containing data about the test results. } \description{ Helper, setup, and teardown files located in the same directory as the test will also be run. See \code{vignette("special-files")} for details. } \section{Environments}{ Each test is run in a clean environment to keep tests as isolated as possible. For package tests, that environment inherits from the package's namespace environment, so that tests can access internal functions and objects. } \examples{ path <- testthat_example("success") test_file(path) test_file(path, desc = "some tests have warnings") test_file(path, reporter = "minimal") } testthat/man/extract_test.Rd0000644000176200001440000000255315104642045015673 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract.R \name{extract_test} \alias{extract_test} \title{Extract a reprex from a failed expectation} \usage{ extract_test(location, path = stdout(), package = Sys.getenv("TESTTHAT_PKG")) } \arguments{ \item{location}{A string giving the location in the form \verb{FILE:LINE[:COLUMN]}.} \item{path}{Path to write the reprex to. Defaults to \code{stdout()}.} \item{package}{If supplied, will be used to construct a test environment for the extracted code.} } \value{ This function is called for its side effect of rendering a reprex to \code{path}. This function will never error: if extraction fails, the error message will be written to \code{path}. } \description{ \code{extract_test()} creates a minimal reprex for a failed expectation. It extracts all non-test code before the failed expectation as well as all code inside the test up to and including the failed expectation. This is particularly useful when you're debugging test failures in someone else's package. } \examples{ # If you see a test failure like this: # -- Failure (test-extract.R:46:3): errors if can't find test ------------- # Expected FALSE to be TRUE. # Differences: # `actual`: FALSE # `expected`: TRUE # You can run this: \dontrun{extract_test("test-extract.R:46:3")} # to see just the code needed to reproduce the failure } testthat/man/try_again.Rd0000644000176200001440000000151415104404205015125 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/try-again.R \name{try_again} \alias{try_again} \title{Evaluate an expectation multiple times until it succeeds} \usage{ try_again(times, code) } \arguments{ \item{times}{Number of times to retry.} \item{code}{Code to evaluate.} } \description{ If you have a flaky test, you can use \code{try_again()} to run it a few times until it succeeds. In most cases, you are better fixing the underlying cause of the flakeyness, but sometimes that's not possible. } \examples{ usually_return_1 <- function(i) { if (runif(1) < 0.1) 0 else 1 } \dontrun{ # 10\% chance of failure: expect_equal(usually_return_1(), 1) # 1\% chance of failure: try_again(1, expect_equal(usually_return_1(), 1)) # 0.1\% chance of failure: try_again(2, expect_equal(usually_return_1(), 1)) } } testthat/man/Reporter.Rd0000644000176200001440000000303115127561732014764 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter.R \name{Reporter} \alias{Reporter} \title{Manage test reporting} \description{ The job of a reporter is to aggregate the results from files, tests, and expectations and display them in an informative way. Every testthat function that runs multiple tests provides a \code{reporter} argument which you can use to override the default (which is selected by \code{\link[=default_reporter]{default_reporter()}}). } \details{ You only need to use this \code{Reporter} object directly if you are creating a new reporter. Currently, creating new Reporters is undocumented, so if you want to create your own, you'll need to make sure that you're familiar with \href{https://adv-r.hadley.nz/r6.html}{R6} and then need read the source code for a few. } \examples{ path <- testthat_example("success") test_file(path) # Override the default by supplying the name of a reporter test_file(path, reporter = "minimal") } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} \keyword{internal} testthat/man/reporter-accessors.Rd0000644000176200001440000000217514164710002017001 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-zzz.R \name{reporter-accessors} \alias{reporter-accessors} \alias{set_reporter} \alias{get_reporter} \alias{with_reporter} \title{Get and set active reporter.} \usage{ set_reporter(reporter) get_reporter() with_reporter(reporter, code, start_end_reporter = TRUE) } \arguments{ \item{reporter}{Reporter to use to summarise output. Can be supplied as a string (e.g. "summary") or as an R6 object (e.g. \code{SummaryReporter$new()}). See \link{Reporter} for more details and a list of built-in reporters.} \item{code}{Code to execute.} \item{start_end_reporter}{Should the reporters \code{start_reporter()} and \code{end_reporter()} methods be called? For expert use only.} } \value{ \code{with_reporter()} invisible returns the reporter active when \code{code} was evaluated. } \description{ \code{get_reporter()} and \code{set_reporter()} access and modify the current "active" reporter. Generally, these functions should not be called directly; instead use \code{with_reporter()} to temporarily change, then reset, the active reporter. } \keyword{internal} testthat/man/StopReporter.Rd0000644000176200001440000000170615127561732015641 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-stop.R \name{StopReporter} \alias{StopReporter} \title{Error if any test fails} \description{ The default reporter used when \code{\link[=expect_that]{expect_that()}} is run interactively. It responds by displaying a summary of the number of successes and failures and \code{\link[=stop]{stop()}}ping on if there are any failures. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/teardown.Rd0000644000176200001440000000253014164710002014772 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/teardown.R \name{teardown} \alias{teardown} \alias{setup} \title{Run code before/after tests} \usage{ teardown(code, env = parent.frame()) setup(code, env = parent.frame()) } \arguments{ \item{code}{Code to evaluate} \item{env}{Environment in which code will be evaluated. For expert use only.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} We no longer recommend using \code{setup()} and \code{teardown()}; instead we think it's better practice to use a \strong{test fixture} as described in \code{vignette("test-fixtures")}. Code in a \code{setup()} block is run immediately in a clean environment. Code in a \code{teardown()} block is run upon completion of a test file, even if it exits with an error. Multiple calls to \code{teardown()} will be executed in the order they were created. } \examples{ \dontrun{ # Old approach tmp <- tempfile() setup(writeLines("some test data", tmp)) teardown(unlink(tmp)) } # Now recommended: local_test_data <- function(env = parent.frame()) { tmp <- tempfile() writeLines("some test data", tmp) withr::defer(unlink(tmp), env) tmp } # Then call local_test_data() in your tests } \keyword{internal} testthat/man/compare.Rd0000644000176200001440000000525615040747537014626 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compare.R \name{compare} \alias{compare} \alias{compare.default} \alias{compare.character} \alias{compare.numeric} \alias{compare.POSIXt} \title{Provide human-readable comparison of two objects} \usage{ compare(x, y, ...) \method{compare}{default}(x, y, ..., max_diffs = 9) \method{compare}{character}( x, y, check.attributes = TRUE, ..., max_diffs = 5, max_lines = 5, width = cli::console_width() ) \method{compare}{numeric}( x, y, tolerance = testthat_tolerance(), check.attributes = TRUE, ..., max_diffs = 9 ) \method{compare}{POSIXt}(x, y, tolerance = 0.001, ..., max_diffs = 9) } \arguments{ \item{x, y}{Objects to compare} \item{...}{Additional arguments used to control specifics of comparison} \item{max_diffs}{Maximum number of differences to show} \item{check.attributes}{If \code{TRUE}, also checks values of attributes.} \item{max_lines}{Maximum number of lines to show from each difference} \item{width}{Width of output device} \item{tolerance}{Numerical tolerance: any differences (in the sense of \code{\link[base:all.equal]{base::all.equal()}}) smaller than this value will be ignored. The default tolerance is \code{sqrt(.Machine$double.eps)}, unless long doubles are not available, in which case the test is skipped.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} \code{compare} is similar to \code{\link[base:all.equal]{base::all.equal()}}, but somewhat buggy in its use of \code{tolerance}. Please use \href{https://waldo.r-lib.org/}{waldo} instead. } \examples{ # Character ----------------------------------------------------------------- x <- c("abc", "def", "jih") compare(x, x) y <- paste0(x, "y") compare(x, y) compare(letters, paste0(letters, "-")) x <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus tincidunt auctor. Vestibulum ac metus bibendum, facilisis nisi non, pulvinar dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " y <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus tincidunt auctor. Vestibulum ac metus1 bibendum, facilisis nisi non, pulvinar dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " compare(x, y) compare(c(x, x), c(y, y)) # Numeric ------------------------------------------------------------------- x <- y <- runif(100) y[sample(100, 10)] <- 5 compare(x, y) x <- y <- 1:10 x[5] <- NA x[6] <- 6.5 compare(x, y) # Compare ignores minor numeric differences in the same way # as all.equal. compare(x, x + 1e-9) } \keyword{internal} testthat/man/DebugReporter.Rd0000644000176200001440000000152115130037147015725 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-debug.R \name{DebugReporter} \alias{DebugReporter} \title{Interactively debug failing tests} \description{ This reporter will call a modified version of \code{\link[=recover]{recover()}} on all broken expectations. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/with_mock.Rd0000644000176200001440000000212615054053615015145 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mock.R \name{with_mock} \alias{with_mock} \alias{local_mock} \title{Mock functions in a package.} \usage{ with_mock(..., .env = topenv()) local_mock(..., .env = topenv(), .local_envir = parent.frame()) } \arguments{ \item{...}{named parameters redefine mocked functions, unnamed parameters will be evaluated after mocking the functions} \item{.env}{the environment in which to patch the functions, defaults to the top-level environment. A character is interpreted as package name.} \item{.local_envir}{Environment in which to add exit handler. For expert use only.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#defunct}{\figure{lifecycle-defunct.svg}{options: alt='[Defunct]'}}}{\strong{[Defunct]}} \code{with_mock()} and \code{local_mock()} are now defunct, and can be replaced by \code{\link[=with_mocked_bindings]{with_mocked_bindings()}} and \code{\link[=local_mocked_bindings]{local_mocked_bindings()}}. These functions only worked by abusing of R's internals. } \keyword{internal} testthat/man/watch.Rd0000644000176200001440000000206415072252215014265 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/watcher.R \name{watch} \alias{watch} \title{Watch a directory for changes (additions, deletions & modifications).} \usage{ watch(path, callback, pattern = NULL, hash = TRUE) } \arguments{ \item{path}{character vector of paths to watch. Omit trailing backslash.} \item{callback}{function called every time a change occurs. It should have three parameters: added, deleted, modified, and should return \code{TRUE} to keep watching, or \code{FALSE} to stop.} \item{pattern}{file pattern passed to \code{\link[=dir]{dir()}}} \item{hash}{hashes are more accurate at detecting changes, but are slower for large files. When \code{FALSE}, uses modification time stamps} } \description{ This is used to power the \code{\link[=auto_test]{auto_test()}} and \code{\link[=auto_test_package]{auto_test_package()}} functions which are used to rerun tests whenever source code changes. } \details{ Use Ctrl + break (windows), Esc (mac gui) or Ctrl + C (command line) to stop the watcher. } \keyword{internal} testthat/man/equality-expectations.Rd0000644000176200001440000000664515054053615017534 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-equality.R \name{equality-expectations} \alias{equality-expectations} \alias{expect_equal} \alias{expect_identical} \title{Do you expect this value?} \usage{ expect_equal( object, expected, ..., tolerance = if (edition_get() >= 3) testthat_tolerance(), info = NULL, label = NULL, expected.label = NULL ) expect_identical( object, expected, info = NULL, label = NULL, expected.label = NULL, ... ) } \arguments{ \item{object, expected}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{...}{\strong{3e}: passed on to \code{\link[waldo:compare]{waldo::compare()}}. See its docs to see other ways to control comparison. \strong{2e}: passed on to \code{\link[=compare]{compare()}}/\code{\link[=identical]{identical()}}.} \item{tolerance}{\strong{3e}: passed on to \code{\link[waldo:compare]{waldo::compare()}}. If non-\code{NULL}, will ignore small floating point differences. It uses same algorithm as \code{\link[=all.equal]{all.equal()}} so the tolerance is usually relative (i.e. \verb{mean(abs(x - y) / mean(abs(y)) < tolerance}), except when the differences are very small, when it becomes absolute (i.e. \verb{mean(abs(x - y) < tolerance}). See waldo documentation for more details. \strong{2e}: passed on to \code{\link[=compare]{compare()}}, if set. It's hard to reason about exactly what tolerance means because depending on the precise code path it could be either an absolute or relative tolerance.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label, expected.label}{Used to customise failure messages. For expert use only.} } \description{ These functions provide two levels of strictness when comparing a computation to a reference value. \code{expect_identical()} is the baseline; \code{expect_equal()} relaxes the test to ignore small numeric differences. In the 2nd edition, \code{expect_identical()} uses \code{\link[=identical]{identical()}} and \code{expect_equal} uses \code{\link[=all.equal]{all.equal()}}. In the 3rd edition, both functions use \href{https://github.com/r-lib/waldo}{waldo}. They differ only in that \code{expect_equal()} sets \code{tolerance = testthat_tolerance()} so that small floating point differences are ignored; this also implies that (e.g.) \code{1} and \code{1L} are treated as equal. } \examples{ a <- 10 expect_equal(a, 10) # Use expect_equal() when testing for numeric equality \dontrun{ expect_identical(sqrt(2) ^ 2, 2) } expect_equal(sqrt(2) ^ 2, 2) } \seealso{ \itemize{ \item \code{\link[=expect_setequal]{expect_setequal()}}/\code{\link[=expect_mapequal]{expect_mapequal()}} to test for set equality. \item \code{\link[=expect_reference]{expect_reference()}} to test if two names point to same memory address. } Other expectations: \code{\link{comparison-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/SlowReporter.Rd0000644000176200001440000000347215127561732015642 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-slow.R, R/reporter-timing.R \name{SlowReporter} \alias{SlowReporter} \title{Find slow tests} \description{ \code{SlowReporter} is designed to identify slow tests. It reports the execution time for each test and can optionally filter out tests that run faster than a specified threshold (default: 1 second). This reporter is useful for performance optimization and identifying tests that may benefit from optimization or parallelization. \code{SlowReporter} is designed to identify slow tests. It reports the execution time for each test, ignoring tests faster than a specified threshold (default: 0.5s). The easiest way to run it over your package is with \code{devtools::test(reporter = "slow")}. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/MinimalReporter.Rd0000644000176200001440000000201215127561732016271 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-minimal.R \name{MinimalReporter} \alias{MinimalReporter} \title{Report minimal results as compactly as possible} \description{ The minimal test reporter provides the absolutely minimum amount of information: whether each expectation has succeeded, failed or experienced an error. If you want to find out what the failures and errors actually were, you'll need to run a more informative test reporter. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/takes_less_than.Rd0000644000176200001440000000060014164710002016312 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/old-school.R \name{takes_less_than} \alias{takes_less_than} \title{Does code take less than the expected amount of time to run?} \usage{ takes_less_than(amount) } \arguments{ \item{amount}{maximum duration in seconds} } \description{ This is useful for performance regression testing. } \keyword{internal} testthat/man/SummaryReporter.Rd0000644000176200001440000000213715127561732016350 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-summary.R \name{SummaryReporter} \alias{SummaryReporter} \title{Report a summary of failures} \description{ This is designed for interactive usage: it lets you know which tests have run successfully and as well as fully reporting information about failures and errors. You can use the \code{max_reports} field to control the maximum number of detailed reports produced by this reporter. As an additional benefit, this reporter will praise you from time-to-time if all your tests pass. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/FailReporter.Rd0000644000176200001440000000155415127561732015570 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-fail.R \name{FailReporter} \alias{FailReporter} \title{Fail if any tests fail} \description{ This reporter will simply throw an error if any of the tests failed. It is best combined with another reporter, such as the \link{SummaryReporter}. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/topic-name.Rd0000644000176200001440000000101615104641761015213 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/extract.R \name{simulate_test_env} \alias{simulate_test_env} \title{Simulate a test environment} \usage{ simulate_test_env(package, path) } \arguments{ \item{package}{Name of installed package.} \item{path}{Path to \code{tests/testthat}.} } \description{ This function is designed to allow you to simulate testthat's testing environment in an interactive session. To undo it's affect, you will need to restart your R session. } \keyword{internal} testthat/man/testthat-package.Rd0000644000176200001440000000225715040747537016427 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/testthat-package.R \docType{package} \name{testthat-package} \alias{testthat} \alias{testthat-package} \title{An R package to make testing fun!} \description{ Try the example below. Have a look at the references and learn more from function documentation such as \code{\link[=test_that]{test_that()}}. } \section{Options}{ \itemize{ \item \code{testthat.use_colours}: Should the output be coloured? (Default: \code{TRUE}). \item \code{testthat.summary.max_reports}: The maximum number of detailed test reports printed for the summary reporter (default: 10). \item \code{testthat.summary.omit_dots}: Omit progress dots in the summary reporter (default: \code{FALSE}). } } \seealso{ Useful links: \itemize{ \item \url{https://testthat.r-lib.org} \item \url{https://github.com/r-lib/testthat} \item Report bugs at \url{https://github.com/r-lib/testthat/issues} } } \author{ \strong{Maintainer}: Hadley Wickham \email{hadley@posit.co} Other contributors: \itemize{ \item Posit Software, PBC [copyright holder, funder] \item R Core team (Implementation of utils::recover()) [contributor] } } \keyword{internal} testthat/man/expect_match.Rd0000644000176200001440000000554115054053615015631 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-match.R \name{expect_match} \alias{expect_match} \alias{expect_no_match} \title{Do you expect a string to match this pattern?} \usage{ expect_match( object, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL ) expect_no_match( object, regexp, perl = FALSE, fixed = FALSE, ..., all = TRUE, info = NULL, label = NULL ) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{regexp}{Regular expression to test against.} \item{perl}{logical. Should Perl-compatible regexps be used?} \item{fixed}{If \code{TRUE}, treats \code{regexp} as a string to be matched exactly (not a regular expressions). Overrides \code{perl}.} \item{...}{ Arguments passed on to \code{\link[base:grep]{base::grepl}} \describe{ \item{\code{ignore.case}}{logical. if \code{FALSE}, the pattern matching is \emph{case sensitive} and if \code{TRUE}, case is ignored during matching.} \item{\code{useBytes}}{logical. If \code{TRUE} the matching is done byte-by-byte rather than character-by-character. See \sQuote{Details}.} }} \item{all}{Should all elements of actual value match \code{regexp} (TRUE), or does only one need to match (FALSE).} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \description{ Do you expect a string to match this pattern? } \details{ \code{expect_match()} checks if a character vector matches a regular expression, powered by \code{\link[=grepl]{grepl()}}. \code{expect_no_match()} provides the complementary case, checking that a character vector \emph{does not} match a regular expression. } \section{Functions}{ \itemize{ \item \code{expect_no_match()}: Check that a string doesn't match a regular expression. }} \examples{ expect_match("Testing is fun", "fun") expect_match("Testing is fun", "f.n") expect_no_match("Testing is fun", "horrible") show_failure(expect_match("Testing is fun", "horrible")) show_failure(expect_match("Testing is fun", "horrible", fixed = TRUE)) # Zero-length inputs always fail show_failure(expect_match(character(), ".")) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/TeamcityReporter.Rd0000644000176200001440000000166315127561732016475 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-teamcity.R \name{TeamcityReporter} \alias{TeamcityReporter} \title{Report results in Teamcity format} \description{ This reporter will output results in the Teamcity message format. For more information about Teamcity messages, see http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}} } \concept{reporters} testthat/man/compare_state.Rd0000644000176200001440000000070713051613152016003 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/watcher.R \name{compare_state} \alias{compare_state} \title{Compare two directory states.} \usage{ compare_state(old, new) } \arguments{ \item{old}{previous state} \item{new}{current state} } \value{ list containing number of changes and files which have been \code{added}, \code{deleted} and \code{modified} } \description{ Compare two directory states. } \keyword{internal} testthat/man/teardown_env.Rd0000644000176200001440000000077714164710002015655 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-files.R \name{teardown_env} \alias{teardown_env} \title{Run code after all test files} \usage{ teardown_env() } \description{ This environment has no purpose other than as a handle for \code{\link[withr:defer]{withr::defer()}}: use it when you want to run code after all tests have been run. Typically, you'll use \code{withr::defer(cleanup(), teardown_env())} immediately after you've made a mess in a \verb{setup-*.R} file. } testthat/man/expect_no_error.Rd0000644000176200001440000000462315047715224016365 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-no-condition.R \name{expect_no_error} \alias{expect_no_error} \alias{expect_no_warning} \alias{expect_no_message} \alias{expect_no_condition} \title{Do you expect the absence of errors, warnings, messages, or other conditions?} \usage{ expect_no_error(object, ..., message = NULL, class = NULL) expect_no_warning(object, ..., message = NULL, class = NULL) expect_no_message(object, ..., message = NULL, class = NULL) expect_no_condition(object, ..., message = NULL, class = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{...}{These dots are for future extensions and must be empty.} \item{message, class}{The default, \verb{message = NULL, class = NULL}, will fail if there is any error/warning/message/condition. In many cases, particularly when testing warnings and messages, you will want to be more specific about the condition you are hoping \strong{not} to see, i.e. the condition that motivated you to write the test. Similar to \code{expect_error()} and friends, you can specify the \code{message} (a regular expression that the message of the condition must match) and/or the \code{class} (a class the condition must inherit from). This ensures that the message/warnings you don't want never recur, while allowing new messages/warnings to bubble up for you to deal with. Note that you should only use \code{message} with errors/warnings/messages that you generate, or that base R generates (which tend to be stable). Avoid tests that rely on the specific text generated by another package since this can easily change. If you do need to test text generated by another package, either protect the test with \code{skip_on_cran()} or use \code{expect_snapshot()}.} } \description{ These expectations are the opposite of \code{\link[=expect_error]{expect_error()}}, \code{expect_warning()}, \code{expect_message()}, and \code{expect_condition()}. They assert the absence of an error, warning, or message, respectively. } \examples{ expect_no_warning(1 + 1) foo <- function(x) { warning("This is a problem!") } # warning doesn't match so bubbles up: expect_no_warning(foo(), message = "bananas") # warning does match so causes a failure: try(expect_no_warning(foo(), message = "problem")) } testthat/man/LocationReporter.Rd0000644000176200001440000000166315127561732016466 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-location.R \name{LocationReporter} \alias{LocationReporter} \title{Test reporter: location} \description{ This reporter simply prints the location of every expectation and error. This is useful if you're trying to figure out the source of a segfault, or you want to figure out which code triggers a C/C++ breakpoint } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/expect_success.Rd0000644000176200001440000000171315072252215016177 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-self-test.R \name{expect_success} \alias{expect_success} \alias{expect_failure} \alias{expect_snapshot_failure} \alias{show_failure} \title{Test your custom expectations} \usage{ expect_success(expr) expect_failure(expr, message = NULL, ...) expect_snapshot_failure(expr) show_failure(expr) } \arguments{ \item{expr}{Code to evaluate} \item{message}{Check that the failure message matches this regexp.} \item{...}{Other arguments passed on to \code{\link[=expect_match]{expect_match()}}.} } \description{ \code{expect_success()} checks that there's exactly one success and no failures; \code{expect_failure()} checks that there's exactly one failure and no successes. \code{expect_snapshot_failure()} records the failure message so that you can manually check that it is informative. Use \code{show_failure()} in examples to print the failure message without throwing an error. } testthat/man/expect_silent.Rd0000644000176200001440000000210415054053615016023 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-silent.R \name{expect_silent} \alias{expect_silent} \title{Do you expect code to execute silently?} \usage{ expect_silent(object) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} } \value{ The first argument, invisibly. } \description{ Checks that the code produces no output, messages, or warnings. } \examples{ expect_silent("123") f <- function() { message("Hi!") warning("Hey!!") print("OY!!!") } \dontrun{ expect_silent(f()) } } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/verify_output.Rd0000644000176200001440000000527015040747537016120 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/verify-output.R \name{verify_output} \alias{verify_output} \title{Verify output} \usage{ verify_output( path, code, width = 80, crayon = FALSE, unicode = FALSE, env = caller_env() ) } \arguments{ \item{path}{Path to record results. This should usually be a call to \code{\link[=test_path]{test_path()}} in order to ensure that the same path is used when run interactively (when the working directory is typically the project root), and when run as an automated test (when the working directory will be \code{tests/testthat}).} \item{code}{Code to execute. This will usually be a multiline expression contained within \code{{}} (similarly to \code{test_that()} calls).} \item{width}{Width of console output} \item{crayon}{Enable cli/crayon package colouring?} \item{unicode}{Enable cli package UTF-8 symbols? If you set this to \code{TRUE}, call \code{skip_if(!cli::is_utf8_output())} to disable the test on your CI platforms that don't support UTF-8 (e.g. Windows).} \item{env}{The environment to evaluate \code{code} in.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} This function is superseded in favour of \code{expect_snapshot()} and friends. This is a regression test that records interwoven code and output into a file, in a similar way to knitting an \code{.Rmd} file (but see caveats below). \code{verify_output()} is designed particularly for testing print methods and error messages, where the primary goal is to ensure that the output is helpful to a human. Obviously, you can't test that with code, so the best you can do is make the results explicit by saving them to a text file. This makes the output easy to verify in code reviews, and ensures that you don't change the output by accident. \code{verify_output()} is designed to be used with git: to see what has changed from the previous run, you'll need to use \verb{git diff} or similar. } \section{Syntax}{ \code{verify_output()} can only capture the abstract syntax tree, losing all whitespace and comments. To mildly offset this limitation: \itemize{ \item Strings are converted to R comments in the output. \item Strings starting with \verb{# } are converted to headers in the output. } } \section{CRAN}{ On CRAN, \code{verify_output()} will never fail, even if the output changes. This avoids false positives because tests of print methods and error messages are often fragile due to implicit dependencies on other packages, and failure does not imply incorrect computation, just a change in presentation. } \keyword{internal} testthat/man/test_examples.Rd0000644000176200001440000000167014164710002016030 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-example.R \name{test_examples} \alias{test_examples} \alias{test_rd} \alias{test_example} \title{Test package examples} \usage{ test_examples(path = "../..") test_rd(rd, title = attr(rd, "Rdfile")) test_example(path, title = path) } \arguments{ \item{path}{For \code{test_examples()}, path to directory containing Rd files. For \code{test_example()}, path to a single Rd file. Remember the working directory for tests is \code{tests/testthat}.} \item{rd}{A parsed Rd object, obtained from \code{\link[tools:Rdutils]{tools::Rd_db()}} or otherwise.} \item{title}{Test title to use} } \description{ These helper functions make it easier to test the examples in a package. Each example counts as one test, and it succeeds if the code runs without an error. Generally, this is redundant with R CMD check, and is not recommended in routine practice. } \keyword{internal} testthat/man/not.Rd0000644000176200001440000000066315047715224013770 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/old-school.R \name{not} \alias{not} \title{Negate an expectation} \usage{ not(f) } \arguments{ \item{f}{an existing expectation function} } \description{ This negates an expectation, making it possible to express that you want the opposite of a standard expectation. This function is deprecated and will be removed in a future version. } \keyword{internal} testthat/man/capture_output.Rd0000644000176200001440000000260314164710002016233 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/capture-output.R \name{capture_output} \alias{capture_output} \alias{capture_output_lines} \alias{testthat_print} \title{Capture output to console} \usage{ capture_output(code, print = FALSE, width = 80) capture_output_lines(code, print = FALSE, width = 80) testthat_print(x) } \arguments{ \item{code}{Code to evaluate.} \item{print}{If \code{TRUE} and the result of evaluating \code{code} is visible, print the result using \code{testthat_print()}.} \item{width}{Number of characters per line of output. This does not inherit from \code{getOption("width")} so that tests always use the same output width, minimising spurious differences.} } \value{ \code{capture_output()} returns a single string. \code{capture_output_lines()} returns a character vector with one entry for each line } \description{ Evaluates \code{code} in a special context in which all output is captured, similar to \code{\link[=capture.output]{capture.output()}}. } \details{ Results are printed using the \code{testthat_print()} generic, which defaults to \code{print()}, giving you the ability to customise the printing of your object in tests, if needed. } \examples{ capture_output({ cat("Hi!\n") cat("Bye\n") }) capture_output_lines({ cat("Hi!\n") cat("Bye\n") }) capture_output("Hi") capture_output("Hi", print = TRUE) } \keyword{internal} testthat/man/reexports.Rd0000644000176200001440000000061714164710002015206 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \docType{import} \name{reexports} \alias{reexports} \alias{\%>\%} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{magrittr}{\code{\link[magrittr:pipe]{\%>\%}}} }} testthat/man/LlmReporter.Rd0000644000176200001440000000215015127561732015432 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-llm.R \name{LlmReporter} \alias{LlmReporter} \title{Report test progress for LLMs} \description{ \code{LlmReporter} is designed for use with Large Language Models (LLMs). It reports problems (warnings, skips, errors, and failures) as they occur and the total number of successes at the end. \code{LlmReporter} is used by default when tests are run by a coding agent. Currently we detect Claude Code, Cursor, and Gemini CLI. If using another tool, configure it to set env var \code{AGENT=1}. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/find_reporter.Rd0000644000176200001440000000077213051613152016021 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-zzz.R \name{find_reporter} \alias{find_reporter} \title{Find reporter object given name or object.} \usage{ find_reporter(reporter) } \arguments{ \item{reporter}{name of reporter(s), or reporter object(s)} } \description{ If not found, will return informative error message. Pass a character vector to create a \link{MultiReporter} composed of individual reporters. Will return null if given NULL. } \keyword{internal} testthat/man/mock_output_sequence.Rd0000644000176200001440000000234315047715224017426 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mock2-helpers.R \name{mock_output_sequence} \alias{mock_output_sequence} \title{Mock a sequence of output from a function} \usage{ mock_output_sequence(..., recycle = FALSE) } \arguments{ \item{...}{<\code{\link[rlang:dyn-dots]{dynamic-dots}}> Values to return in sequence.} \item{recycle}{whether to recycle. If \code{TRUE}, once all values have been returned, they will be returned again in sequence.} } \value{ A function that you can use within \code{local_mocked_bindings()} and \code{with_mocked_bindings()} } \description{ Specify multiple return values for mocking } \examples{ # inside local_mocked_bindings() \dontrun{ local_mocked_bindings(readline = mock_output_sequence("3", "This is a note", "n")) } # for understanding mocked_sequence <- mock_output_sequence("3", "This is a note", "n") mocked_sequence() mocked_sequence() mocked_sequence() try(mocked_sequence()) recycled_mocked_sequence <- mock_output_sequence( "3", "This is a note", "n", recycle = TRUE ) recycled_mocked_sequence() recycled_mocked_sequence() recycled_mocked_sequence() recycled_mocked_sequence() } \seealso{ Other mocking: \code{\link{local_mocked_bindings}()} } \concept{mocking} testthat/man/oldskool.Rd0000644000176200001440000000266615047715224015023 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/old-school.R \name{oldskool} \alias{oldskool} \alias{is_a} \alias{has_names} \alias{is_less_than} \alias{is_more_than} \alias{equals} \alias{is_equivalent_to} \alias{is_identical_to} \alias{equals_reference} \alias{shows_message} \alias{gives_warning} \alias{prints_text} \alias{throws_error} \title{Old-style expectations.} \usage{ is_a(class) has_names(expected, ignore.order = FALSE, ignore.case = FALSE) is_less_than(expected, label = NULL, ...) is_more_than(expected, label = NULL, ...) equals(expected, label = NULL, ...) is_equivalent_to(expected, label = NULL) is_identical_to(expected, label = NULL) equals_reference(file, label = NULL, ...) shows_message(regexp = NULL, all = FALSE, ...) gives_warning(regexp = NULL, all = FALSE, ...) prints_text(regexp = NULL, ...) throws_error(regexp = NULL, ...) } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} Initial testthat used a style of testing that looked like \verb{expect_that(a, equals(b)))} this allowed expectations to read like English sentences, but was verbose and a bit too cutesy. This style will continue to work but has been soft-deprecated - it is no longer documented, and new expectations will only use the new style \code{expect_equal(a, b)}. } \keyword{internal} testthat/man/local_mocked_r6_class.Rd0000644000176200001440000000164315072252215017371 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mock-oo.R \name{local_mocked_r6_class} \alias{local_mocked_r6_class} \title{Mock an R6 class} \usage{ local_mocked_r6_class( class, public = list(), private = list(), frame = caller_env() ) } \arguments{ \item{class}{An R6 class definition.} \item{public, private}{A named list of public and private methods/data.} \item{frame}{Calling frame which determines the scope of the mock. Only needed when wrapping in another local helper.} } \description{ This function allows you to temporarily override an R6 class definition. It works by creating a subclass then using \code{\link[=local_mocked_bindings]{local_mocked_bindings()}} to temporarily replace the original definition. This means that it will not affect subclasses of the original class; please file an issue if you need this. Learn more about mocking in \code{vignette("mocking")}. } testthat/man/local_mocked_bindings.Rd0000644000176200001440000001165015072252215017451 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mock2.R \name{local_mocked_bindings} \alias{local_mocked_bindings} \alias{with_mocked_bindings} \title{Temporarily redefine function definitions} \usage{ local_mocked_bindings(..., .package = NULL, .env = caller_env()) with_mocked_bindings(code, ..., .package = NULL) } \arguments{ \item{...}{Name-value pairs providing new values (typically functions) to temporarily replace the named bindings.} \item{.package}{The name of the package where mocked functions should be inserted. Generally, you should not supply this as it will be automatically detected when whole package tests are run or when there's one package under active development (i.e. loaded with \code{\link[pkgload:load_all]{pkgload::load_all()}}). We don't recommend using this to mock functions in other packages, as you should not modify namespaces that you don't own.} \item{.env}{Environment that defines effect scope. For expert use only.} \item{code}{Code to execute with specified bindings.} } \description{ \code{with_mocked_bindings()} and \code{local_mocked_bindings()} provide tools for "mocking", temporarily redefining a function so that it behaves differently during tests. This is helpful for testing functions that depend on external state (i.e. reading a value from a file or a website, or pretending a package is or isn't installed). Learn more in \code{vignette("mocking")}. } \section{Use}{ There are four places that the function you are trying to mock might come from: \itemize{ \item Internal to your package. \item Imported from an external package via the \code{NAMESPACE}. \item The base environment. \item Called from an external package with \code{::}. } They are described in turn below. (To mock S3 & S4 methods and R6 classes see \code{\link[=local_mocked_s3_method]{local_mocked_s3_method()}}, \code{\link[=local_mocked_s4_method]{local_mocked_s4_method()}}, and \code{\link[=local_mocked_r6_class]{local_mocked_r6_class()}}.) \subsection{Internal & imported functions}{ You mock internal and imported functions the same way. For example, take this code: \if{html}{\out{
    }}\preformatted{some_function <- function() \{ another_function() \} }\if{html}{\out{
    }} It doesn't matter whether \code{another_function()} is defined by your package or you've imported it from a dependency with \verb{@import} or \verb{@importFrom}, you mock it the same way: \if{html}{\out{
    }}\preformatted{local_mocked_bindings( another_function = function(...) "new_value" ) }\if{html}{\out{
    }} } \subsection{Base functions}{ To mock a function in the base package, you need to make sure that you have a binding for this function in your package. It's easiest to do this by binding the value to \code{NULL}. For example, if you wanted to mock \code{interactive()} in your package, you'd need to include this code somewhere in your package: \if{html}{\out{
    }}\preformatted{interactive <- NULL }\if{html}{\out{
    }} Why is this necessary? \code{with_mocked_bindings()} and \code{local_mocked_bindings()} work by temporarily modifying the bindings within your package's namespace. When these tests are running inside of \verb{R CMD check} the namespace is locked which means it's not possible to create new bindings so you need to make sure that the binding exists already. } \subsection{Namespaced calls}{ It's trickier to mock functions in other packages that you call with \code{::}. For example, take this minor variation: \if{html}{\out{
    }}\preformatted{some_function <- function() \{ anotherpackage::another_function() \} }\if{html}{\out{
    }} To mock this function, you'd need to modify \code{another_function()} inside the \code{anotherpackage} package. You \emph{can} do this by supplying the \code{.package} argument to \code{local_mocked_bindings()} but we don't recommend it because it will affect all calls to \code{anotherpackage::another_function()}, not just the calls originating in your package. Instead, it's safer to either import the function into your package, or make a wrapper that you can mock: \if{html}{\out{
    }}\preformatted{some_function <- function() \{ my_wrapper() \} my_wrapper <- function(...) \{ anotherpackage::another_function(...) \} local_mocked_bindings( my_wrapper = function(...) "new_value" ) }\if{html}{\out{
    }} } \subsection{Multiple return values / sequence of outputs}{ To mock a function that returns different values in sequence, for instance an API call whose status would be 502 then 200, or an user input to \code{readline()}, you can use \code{\link[=mock_output_sequence]{mock_output_sequence()}} \if{html}{\out{
    }}\preformatted{local_mocked_bindings(readline = mock_output_sequence("3", "This is a note", "n")) }\if{html}{\out{
    }} } } \seealso{ Other mocking: \code{\link{mock_output_sequence}()} } \concept{mocking} testthat/man/local_edition.Rd0000644000176200001440000000117115047715224015770 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/edition.R \name{local_edition} \alias{local_edition} \alias{edition_get} \title{Temporarily change the active testthat edition} \usage{ local_edition(x, .env = parent.frame()) edition_get() } \arguments{ \item{x}{Edition Should be a single integer.} \item{.env}{Environment that controls scope of changes. For expert use only.} } \description{ \code{local_edition()} allows you to temporarily (within a single test or a single test file) change the active edition of testthat. \code{edition_get()} allows you to retrieve the currently active edition. } testthat/man/testthat_tolerance.Rd0000644000176200001440000000053115056134100017042 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compare.R \name{testthat_tolerance} \alias{testthat_tolerance} \title{Default numeric tolerance} \usage{ testthat_tolerance() } \description{ testthat's default numeric tolerance is 1.4901161 × 10\if{html}{\out{}}-8\if{html}{\out{}}. } \keyword{internal} testthat/man/ListReporter.Rd0000644000176200001440000000157215127561732015630 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-list.R \name{ListReporter} \alias{ListReporter} \title{Capture test results and metadata} \description{ This reporter gathers all results, adding additional information such as test elapsed time, and test filename if available. Very useful for reporting. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/run_cpp_tests.Rd0000644000176200001440000000117514164710002016043 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-compiled-code.R \name{expect_cpp_tests_pass} \alias{expect_cpp_tests_pass} \alias{run_cpp_tests} \title{Do C++ tests past?} \usage{ expect_cpp_tests_pass(package) run_cpp_tests(package) } \arguments{ \item{package}{The name of the package to test.} } \description{ Test compiled code in the package \code{package}. A call to this function will automatically be generated for you in \code{tests/testthat/test-cpp.R} after calling \code{\link[=use_catch]{use_catch()}}; you should not need to manually call this expectation yourself. } \keyword{internal} testthat/man/JunitReporter.Rd0000644000176200001440000000303515127561732016002 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-junit.R \name{JunitReporter} \alias{JunitReporter} \title{Report results in jUnit XML format} \description{ This reporter includes detailed results about each test and summaries, written to a file (or stdout) in jUnit XML format. This can be read by the Jenkins Continuous Integration System to report on a dashboard etc. Requires the \emph{xml2} package. To fit into the jUnit structure, \code{context()} becomes the \verb{} name as well as the base of the \verb{ classname}. The \code{test_that()} name becomes the rest of the \verb{ classname}. The deparsed \code{expect_that()} call becomes the \verb{} name. On failure, the message goes into the \verb{} node message argument (first line only) and into its text content (full message). Execution time and some other details are also recorded. References for the jUnit XML format: \url{https://github.com/testmoapp/junitxml} } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/is_testing.Rd0000644000176200001440000000220015040747537015332 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-env.R \name{is_testing} \alias{is_testing} \alias{is_parallel} \alias{is_checking} \alias{is_snapshot} \alias{testing_package} \title{Determine testing status} \usage{ is_testing() is_parallel() is_checking() is_snapshot() testing_package() } \description{ These functions help you determine if you code is running in a particular testing context: \itemize{ \item \code{is_testing()} is \code{TRUE} inside a test. \item \code{is_snapshot()} is \code{TRUE} inside a snapshot test \item \code{is_checking()} is \code{TRUE} inside of \verb{R CMD check} (i.e. by \code{\link[=test_check]{test_check()}}). \item \code{is_parallel()} is \code{TRUE} if the tests are run in parallel. \item \code{testing_package()} gives name of the package being tested. } A common use of these functions is to compute a default value for a \code{quiet} argument with \code{is_testing() && !is_snapshot()}. In this case, you'll want to avoid an run-time dependency on testthat, in which case you should just copy the implementation of these functions into a \code{utils.R} or similar. } testthat/man/figures/0000755000176200001440000000000015047715224014340 5ustar liggesuserstestthat/man/figures/lifecycle-questioning.svg0000644000176200001440000000171414164710002021352 0ustar liggesuserslifecyclelifecyclequestioningquestioning testthat/man/figures/lifecycle-stable.svg0000644000176200001440000000247215047715224020275 0ustar liggesusers lifecycle: stable lifecycle stable testthat/man/figures/lifecycle-experimental.svg0000644000176200001440000000245015047715224021514 0ustar liggesusers lifecycle: experimental lifecycle experimental testthat/man/figures/lifecycle-deprecated.svg0000644000176200001440000000244015047715224021116 0ustar liggesusers lifecycle: deprecated lifecycle deprecated testthat/man/figures/lifecycle-superseded.svg0000644000176200001440000000244015047715224021161 0ustar liggesusers lifecycle: superseded lifecycle superseded testthat/man/figures/logo.png0000644000176200001440000004760315040747537016025 0ustar liggesusersPNG  IHDRޫhgAMA a cHRMz&u0`:pQ<bKGDtIME!z _NbIDATxwսgfWw+w M 54)7ɽ)o{P$nwM3s?Fjڦ2GّfΣ~͈AJ|Rʓ;>|_g޹Gv y_+m7`?I&/pr.,޶<yߔm -aLpkz^xãgpm˵Ű[Ô4zs?3Y،X ?~;nrg3ű[ÌVWn)@4MňDZZomz;|c^}偣ae+ul( `5R`lO 3,~PnB Tu⪮/~0mLכ+@q86i>_պyKs9aW݅¤u=5C!խLJWq,WM뾹 x3+c[C4 `p=p'pzicMGN6l\ sĉw!\UNMl!2&<|(/B6 ïB'4CSW7;cXPw~ģ [Hx}' {tZܨ)5i܄+TeS=ܲysGY~ԣ[@pU`1phnNb5` }QĐy YUO [5!RlLXΡ1!B/@ymwku{':tSJZ㖐'2TWe]uq+x1~홏6l "Sh u"oA^2f8ov;_~ܔH#F]zs޺iscyL=WG3 LxXdݾ+x .To8bXM\(]Y1c-7f{,س\0҄Gaƚ}^O߽ 87r}ָs|jHqq_k֭ec꫊Vф-sꆏV'r>kwpd7i yhUq+7^Cb9ss/[Dܔ4&Œ7i>5qnݼf\N}!۶#Sˁ4:B?VSH& ҶV/<1vOtVPb|3eͼO7FfnW>o=tR9f1,⏥.aՒ!Hp'_LF:{&ч@1J :>nIE e>>vi[v1J]!->HN_^f0Hgh[y 둦bhݟG8fJ& 1X!N)큪Ys#C_cJDp5`9z)X~}mwB5HdvBА4",SQbr/}yUf.dxל"]>kfaRۺn'm (鳢kXf@/m1٪w;+*xǢ;J3kPboz>E{i{GDId[][{Ko_x4 ~S/;#Yl~J1Z[ݳmН*E-f>ܲyKfNRjgI6?3&%)x|D!i[5i-m1-Ϯ޾Nc̢E,RdZE 灺iDyzPh FM: 3>>zw}{ͼA=茪ڕ9,!7{ݳ>|`DLPA-^SG󆍍 s\.1jjY4%XȃΏΤia-Njʊ3ٲacj|].0&\A};o'ycvڭn)mQuv_um.5k8KZTFl.O7.sb^]K^*2\)mq_ҶsK;8‚. #erIiݘHӘp @fX7l[TϙK.O.(#fNO6Ώ<ްn(mys9[њys9c[51՝e8g47ģk@)[mHŷ>89,.uQfX 8Cl~_% zyZWJw&7wmZs˙2J]2C~\m~R׉:+o;eMd-FMbv-[Ihݼ%P=g6CƦK\یF YE[ڨv gj[t:9|S>ҺekjCz!Ys3l~it>$D1OjS8d[|_9lf|RCgtƲ]L6؇.[#=*+۴msaWb59C5tF{}wh`OPdl[fѲqず9kO\b^6S v|Y+eHӴBA-^Qy!j~pͼyx%-ojuwl~e9/'Vֵkё&?`[ n#z;ds̢E,ʒokwFwy V'$emwNOaZ@ ضEmqU]}U/$]p/֠qPkyx+Oaz]Hl72SOfXfc ,mq]?-[[gz8(Wɰ@ϑvJbm bOPAm/8ʮ?7ތU:|PEp_r7\gl}J֛S&/`[ $m[C,,OE+CQj~ƺ9ͻK V֗_)[mȶŝIbúv]~(2v;+\{;mI?hn'l JfXfcamo&ln=S_0%d䔤WoGx^;e͠ԭָ+&x~V;Ƃk.em~V')[7)[mA-媬xeL> Uw1zuMZ֘=ε) YmpNEšP=Ʀnw=&gOG;f@>f(D,z;eȶŭWnzz|,f}=~ǫ={izw=5[6n:E`DIPԩ=n N:sqV\%uvǭVBF씭6Ûl 4oTϞIwnWY{ojyexp#H0ʒ7&DJe+ *>emwN'0Z[@ IVۢ1y Î]-6SgXUzibJz8E}.(M!jjc)'Q7gƋ/q׽2;fdͶ썘 7i[ܵ'3p1|RJ<n}ΊiQ8`Zh *4KUbDY3KqVʱ5չ~q> "U`ݔLI ZpEeMl3zeK@zk|I,!'WigXt3Vx.i׭zTKK[6Cu]$.!(T> Zr'ߧޯ߉;h_>kD )%: *k klSj^t`*HaҞ_Qpr:) "}9 :u~B&(c%^c`!BGЄ~LiþSRJ4!E^(x'KI&Es9L) 2 _T8TSl3qo<:.dMƠRBCb|cb_-pɢ3v9j)1](sذPceBSkMb %^VK?IB0K ]Sڤ_0Ի9)fbR1 bHbe2'l8VY/l :$V$SU$,i$B5 jHHCm&d;Pp: 8T K VTJ !f#M}24Rk n; ̦8،s)!*], ': /)i5%qK޴;Ӧ7ΐ/ \XX) #i@zsHWIW1[LI[338Q(0Nb˛drZF5J /f<=so$!DC:G5CY0D*eʀ1$vӤ-. b.@HFHҹ/O>;z11(f 8U2TMt+g\? ,[Hg&Q`!Rp01"NFX%yC[0,n^M^J&뒶3V`1gÔ!ݠ!V)Vˬ 4 @"\n:5w>Ii5Ioz Q"kN<\Q Ꜣ K܃fB8C:]vHgwR\qj 83(6Ⅱ."pE0@[*u b&uI,nv ҙG!P~yݎI|p=>xa by<K81imth%3.q i#vHg'BU?I>wX]f#9R!(iK"F.)1 p9UiE֖$b BHhL8iY\B]d02g2 tH'[~DXLN qS!hX-s. rPB:c1IKFHߎ<uRipxa8 8AK cȡ*A . %!##HQ`ɐN-3kxd["b!K[]Z>"P]3[b֋8KtF0N;_Iouo{ΰ0٥6ƨ4EW~ $7 ./!TmmnM(bn7~%K #C0 55\S1{.S-1{.AúKXX$(!YYHg*Ҫ/H)z]*im.%`0"ƺ=1b ti-X@/} >ЬGj㠳/_Bh3Št~RJԊJg7ʲ]pt0 HH=\Jδ7ru{bd 8A5NDpO}B_YJN'>m%+IHpH었o.?*>IJyy֏)`xt&]~eMuXBČL&RqjXV;.v{BJPB:uK̃ٙϟHW6D)eehc_>EA>CeJHNV'&2nB=tvx+ J');_&ZtV7//4}SH!gMlM" !دבVS+v 7ɨpnBcu{]v$<!'0Ctfc = a9*ƹ1$%d:]Źf@"pd0B:&OPyWڷ.9@sXS #CԥF+VXIRtLʲZ۔{3(!!#aĐb&B߳]77eNtVo-1tfT XJv!\nDŽ(Uըu(똁6io*EPָzF)|/RjF1MKv1qB֏(QN v0P4)5$[b6IhWbޢ ,AAFetNL -` ژ1G0SuML kVzMH]M-׋!!e#v|D  G%+;-ܾ7)\~ K-?aPkP.&z8LG:hgGZZib&/5Ntگ%)-M?\,1U+~|CnFx<*=>i|+{_zNLEI)Qt.MFISp#ʹ~[XeRt}!uB U ɗuaS],hP].]LYLi3DC:8JS &ºWc6b X|)}U*>Y< ?^gon3DM B=~9?)i Ro|ϟDm(DL :Ҳt)ĭ zT*U' Iњt+t*AͨPݰ1Y,1B/LlvuJN&VP_?.PmDlB߻3FxƍyafuQar㵸KUv &tEE 0iF, TRUR-n6APaT K1)QUԁFM%3qϛC/~UR 2mAͷ18̥⢋f-4~ں_N BP倸҉I-srF;. S\nJk LiP9mjfG-H?nl $ɴFS^&T~T}k(^Os~?U_:o7@(A^qBHڐglT"-3BX޲ъ-A"ْU\iihw;ۺ%b[}|na>O>ݓy%N?k B=w:ia+hoE<ჁhQv)*4 wC|7^ mۻr.[ 5;FoO` PqzoOR/TWE-4,:ǔ?IL "0[FP+J|-;d\3g]ocGk vH)g }O~^4 6fC;&M@75mf+lljcoރ(huu(oiUf30\lD5{vCAdH]/HcuII4΀)%CշcnFm+ ZЈŬYfǴ)x-{r\@x=m -b#% Əv u awlkCF!`4-3h0qy˲.k4ָ:'UX Ν^^M-QvɨV}4l)qL=QQ)&j]m"BVW!UwYKoj$m{g夠(H)14v]M9-A@0>kإ/ҋ t:+>ys7Dy/!dةoo?n6=c $vRkjJ}';R볾7Cd,6XRJsP7NQ³xm$~5񤨖1( ]D7nB_|'6y#{BE0֨Tab x JJux}:BQ!=#ǭBQ<)3k7Zx<Y !qko$jMUI^{qU}猃S-yNMBg_*2Š[6԰k2p9YE!87\B9v.ɌP}h/rEqq/OLOJ B`qwGqU֊mlgZȾpqB74ݴ9aQGqprMJ[:cx%|#_"n=]{6ysBKUMeBrl 'eue<(MuĚZYf@,NU%UAxb[բӯH,cqLLmmN HB;"R ׽F_Oyڤ냸cҴ^)%cTeI]g2+Zr=|K1yn;S7ZZ =b(wbH{r| PЋ/߽g@"N]0^7ر8&G&t ŏL}Sv )ӻkP*+PeD^rJj%L󕑝 Pp}dhס3zCC2; !8!i%f 7,:Kʐ V}Ll6bۺ﷞ WΆp8&MrXXEILji8&O=o614aY3>g/eKQS5g-O-Zir8e+e}nB|9ƎA9gc4;v۴%'%Ht{@2E-+ûli֊(IxO:'ٔw»t1_&.5[(f]Ǝ{Ν`gr_jU%cY+[rI!'Tw="*Zi(~O> p>${/q"p\êR|YbK 雍jM 9~9jy9eK@7&SӉ/1q"GS 7bDֽww3U4׼R9>rB/w߈ ؎( 4{Uj!B #Qtje%e8#$TyhmCߵR>ZpTU&աVUvjr͝p:  UAbwޣ8g Ell^ ?OjD"sccnm 3sz@VVVV}ouL*Ƃl EloX֭6nLg3ve4*7aٓF[գh)_?[&N1m*?؁ e?t !*{hAqLZ[p: vVww.@O( f(L_NuT|"'V;Fk+l(R}%#O-~"20 }7ԫ_QZRyw=/\,=i( a4 ="IuĶm Gz 6/%ژ1]W߿߈  8l᥮y]"Gfɓp<ǔ)hcQ*SceP-؈g/]쵬~H>{n65Y;eGw\$rd +%d)11o z@t*)뤷}`?F2GUW2%^3Ogdit6bg0.RJ+Ⱦq)HlKHɿu{eRMnOs K"*;> (55?y&UNyjàG(4[Ep10M\sPo@3DxZo݊ d1mN Fjk,=Ϸ F8}7a ?%u4x"u+Pkjk:Ŀh&b(lY><ʁ_{wx[([w#:zo Kɦ0e֢XY "'~_cd3gb p׽tIG&Ũ~RJCs"<`>YGE!SHFbyzE&1_#p͙cDzOfH2('wH0&7LﲥWw#mY vBcme+QM000ftBaR%ɝs;ebOH95m8q}{ҩvyO4IX2cVGQzz4χ/CqWf{{g`I3^OFX$T bV?߅4޷'5D^! L:BV Aӻ&\Mqu]9) cD<YtTRSp߹țo^歖]~4jeKx5֞H=H9vc3]$Y@X z?fG{M";Q^tT^/; UE|}ٲ %'Wp|P?VWS~xCu7Ҷnd4?;`bL'.\jN;xE(  \}ߔ.0 pDWQqE(wNƎ'?D6O-EoR TeT^y&&kOvLOa(]?WCd\$DSKsok_ =:?GMfGzSzCzSNXAZY}bJqo:hk7ex_v@Q~g]+ؿ؎15 qmSzk{]MCyt#x2&?BO[$~PT+aPvƩTrMtL6/4W8=WVUv@;HFxa x!$p\{ӏ%:s| Gs=;bKWU"oCv4Z+e.--3P .@ôyF{{h[4>Wx/!}>:FK4M⦅;s~* mܜppΜ1xcd2F_٦..go8L<چȾ6n,1*Kmw}CmHڊkuėb, 椴zW**I5NXhlZ[}~k=Asp$^/jejuju5Je χ>K]ՊTK?,N[tR^={G)B?5wU6<)812[:ȴ$Ι3|"p2@wk 6AϹyL*>N?mR̦HD]_pU_5s='JZ"l [i(~j~]?eƁ&;wcݻ1`pi[_=^6< jnt&yg ;MGU_pn67[o[i.̸iXj* FGC&ܓ; Tu@ lEHi%} <W^S )j2Al{\;Dk?^N2B*/_kVM# 0[Zv4'Xga=Q-`VF7l6΂w1T2Ԛ*kQ]OHTTz֡7e?qSsw.?iBȩWbmihR˩=i =#!Л}#a˾{H~wBUP@=EI;&MvX8T~G, j =HDzƏE ;:J]>B`w#fG8Lf̟]Sv##Qk1a%Y8?45-hhD YdpWXu1ic?B/Lw138L&j >P# [BE穼oQUB[.rYxvg8!ԪJgٟi>^'{ƟF"rt)7{n^vf+`G2z BQmpwsV"E!a# ?Ms>!?mݎamEn~mvJw3lp@rϡϳLwz\jDxPdN u=W^*Hx{ïƾoc03 ~-~S]55r<[ \szK|Y\^>wLϢBe2'c(;T|'uӴ6n@߻oz$M x/^Ķm'ks4M*:#w|fi QG;TbfVEN:%SaF)J0"]C ' Btj7$1hF0r/]IVU0l d=%C R6v$ISDw$-)%H:Ԡ0hh#2)\o@VR_4a'zi<.%1hŤ:N&~mJLRKLI$&р=B$] B@ R3tᚩqIC Uϯz0$,%a$Nșm3$ǹHvJD 7}S@ 0HLfIi Ip#f^#RW$Xݰ;`UĀ'ˁI_?p.=n<џw Pb fԐ^u)id4 1M6g~ :[&_ l:V@yqBی`)%mqn{\)]|Ι!ywO.T&T!D0V(bf&5k&5j!q%V+SIz \ɩ&f؄d4#\!P{gL[Tz(^f2<*E`\.OꊴL[6)%@j;5dsk_~q3vRW}c@ZIš>W#+Tڱ%a Ims+T5\O8W|_sYqM>WNI8k|g) -EOͯF qTW>ߕӧ=ֲysj,N_VA% btm$=Ȅk52-xCq8>P=kB`ae&O6}Ҥ59%GM .Ί]杇]%K8/JO,Zv:+Ls@$#m|[c~ik&0].󎲲+joĪo)zyöI۶E(ͯKӹ^zwOmUr z=VO *0iJ2MbܽwWuMW'1g1jJV \@*21LS1 aSWn-Z"YȒ?_Yrʰ-Hmq;'ڶE(oVVvE=λs8KZ!Sӄ\e[0;su-ZaxSD3vʖj+ 4\JNΘU۶8z(ͯAxnsUU]}KBWK]Cm6ǝ+g;l5F£Yzե.n7tͰ-_m%umqSD+?ֲisj9!_o?| *۶8n3 b1}_xּa 8r..u{e,o'ObEb|l5'm~_Yy[e/]Ͱ-խ>۶8b)O.I'Wϙö %E)}7z۶8d( wUٔn^5{Z9 ,C@;p09sS0Ei^5oxg'ERۦ-0#ö5>k/>IR·~U+^fݑGK]D` x&dV:H)Ue.sMXyW4[?SE[ØjP(waL 6IbsmfbUzR&GlBnG-oo^$ @jr(/nkk~R&xGu< lifecyclelifecyclearchivedarchived testthat/man/figures/lifecycle-defunct.svg0000644000176200001440000000170414164710002020434 0ustar liggesuserslifecyclelifecycledefunctdefunct testthat/man/figures/lifecycle-soft-deprecated.svg0000644000176200001440000000172614164710002022061 0ustar liggesuserslifecyclelifecyclesoft-deprecatedsoft-deprecated testthat/man/figures/lifecycle-maturing.svg0000644000176200001440000000170614164710002020634 0ustar liggesuserslifecyclelifecyclematuringmaturing testthat/man/expect_output.Rd0000644000176200001440000000511515054053615016072 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-output.R \name{expect_output} \alias{expect_output} \title{Do you expect printed output to match this pattern?} \usage{ expect_output( object, regexp = NULL, ..., info = NULL, label = NULL, width = 80 ) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{regexp}{Regular expression to test against. \itemize{ \item A character vector giving a regular expression that must match the output. \item If \code{NULL}, the default, asserts that there should output, but doesn't check for a specific value. \item If \code{NA}, asserts that there should be no output. }} \item{...}{ Arguments passed on to \code{\link[=expect_match]{expect_match}} \describe{ \item{\code{all}}{Should all elements of actual value match \code{regexp} (TRUE), or does only one need to match (FALSE).} \item{\code{fixed}}{If \code{TRUE}, treats \code{regexp} as a string to be matched exactly (not a regular expressions). Overrides \code{perl}.} \item{\code{perl}}{logical. Should Perl-compatible regexps be used?} }} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} \item{width}{Number of characters per line of output. This does not inherit from \code{getOption("width")} so that tests always use the same output width, minimising spurious differences.} } \value{ The first argument, invisibly. } \description{ Test for output produced by \code{print()} or \code{cat()}. This is best used for very simple output; for more complex cases use \code{\link[=expect_snapshot]{expect_snapshot()}}. } \examples{ str(mtcars) expect_output(str(mtcars), "32 obs") expect_output(str(mtcars), "11 variables") # You can use the arguments of grepl to control the matching expect_output(str(mtcars), "11 VARIABLES", ignore.case = TRUE) expect_output(str(mtcars), "$ mpg", fixed = TRUE) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/quasi_label.Rd0000644000176200001440000000331415047715224015445 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/quasi-label.R \name{quasi_label} \alias{quasi_label} \title{Quasi-labelling} \usage{ quasi_label(quo, label = NULL, arg = NULL) } \arguments{ \item{quo}{A quosure created by \code{rlang::enquo()}.} \item{label}{An optional label to override the default. This is only provided for internal usage. Modern expectations should not include a \code{label} parameter.} \item{arg}{Argument name shown in error message if \code{quo} is missing.} } \value{ A list containing two elements: \item{val}{The evaluate value of \code{quo}} \item{lab}{The quasiquoted label generated from \code{quo}} } \description{ The first argument to every \code{expect_} function can use unquoting to construct better labels. This makes it easy to create informative labels when expectations are used inside a function or a for loop. \code{quasi_label()} wraps up the details, returning the expression and label. } \section{Limitations}{ Because all \code{expect_} function use unquoting to generate more informative labels, you can not use unquoting for other purposes. Instead, you'll need to perform all other unquoting outside of the expectation and only test the results. } \examples{ f <- function(i) if (i > 3) i * 9 else i * 10 i <- 10 # This sort of expression commonly occurs inside a for loop or function # And the failure isn't helpful because you can't see the value of i # that caused the problem: show_failure(expect_equal(f(i), i * 10)) # To overcome this issue, testthat allows you to unquote expressions using # !!. This causes the failure message to show the value rather than the # variable name show_failure(expect_equal(f(!!i), !!(i * 10))) } \keyword{internal} testthat/man/expectation.Rd0000644000176200001440000000314015104404205015470 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expectation.R \name{expectation} \alias{expectation} \alias{new_expectation} \alias{exp_signal} \alias{is.expectation} \title{Expectation conditions} \usage{ expectation(type, message, ..., srcref = NULL, trace = NULL) new_expectation( type, message, ..., srcref = NULL, trace = NULL, .subclass = NULL ) exp_signal(exp) is.expectation(x) } \arguments{ \item{type}{Expectation type. Must be one of "success", "failure", "error", "skip", "warning".} \item{message}{Message describing test failure} \item{...}{Additional attributes for the expectation object.} \item{srcref}{Optional \code{srcref} giving location of test.} \item{trace}{An optional backtrace created by \code{\link[rlang:trace_back]{rlang::trace_back()}}. When supplied, the expectation is displayed with the backtrace. Expert use only.} \item{.subclass}{An optional subclass for the expectation object.} \item{exp}{An expectation object, as created by \code{\link[=new_expectation]{new_expectation()}}.} \item{x}{object to test for class membership} } \description{ \code{new_expectation()} creates an expectation condition object and \code{exp_signal()} signals it. \code{expectation()} does both. \code{is.expectation()} tests if a captured condition is a testthat expectation. These functions are primarily for internal use. If you are creating your own expectation, you do not need these functions are instead should use \code{\link[=pass]{pass()}} or \code{\link[=fail]{fail()}}. See \code{vignette("custom-expectation")} for more details. } \keyword{internal} testthat/man/testthat_examples.Rd0000644000176200001440000000106115047715224016717 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/examples.R \name{testthat_examples} \alias{testthat_examples} \alias{testthat_example} \title{Retrieve paths to built-in example test files} \usage{ testthat_examples() testthat_example(filename) } \arguments{ \item{filename}{Name of test file} } \description{ \code{testthat_examples()} retrieves path to directory of test files, \code{testthat_example()} retrieves path to a single test file. } \examples{ dir(testthat_examples()) testthat_example("success") } \keyword{internal} testthat/man/skip_on_travis.Rd0000644000176200001440000000110115040747537016213 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/skip.R \name{skip_on_travis} \alias{skip_on_travis} \alias{skip_on_appveyor} \title{Superseded skip functions} \usage{ skip_on_travis() skip_on_appveyor() } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} \itemize{ \item \code{skip_on_travis()} and \code{skip_on_appveyor()} have been superseded by \code{\link[=skip_on_ci]{skip_on_ci()}}. } } \keyword{internal} testthat/man/testthat_results.Rd0000644000176200001440000000111413171137773016604 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-list.R \name{testthat_results} \alias{testthat_results} \title{Create a \code{testthat_results} object from the test results as stored in the ListReporter results field.} \usage{ testthat_results(results) } \arguments{ \item{results}{a list as stored in ListReporter} } \value{ its list argument as a \code{testthat_results} object } \description{ Create a \code{testthat_results} object from the test results as stored in the ListReporter results field. } \seealso{ ListReporter } \keyword{internal} testthat/man/local_mocked_s3_method.Rd0000644000176200001440000000215715127561732017553 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mock-oo.R \name{local_mocked_s3_method} \alias{local_mocked_s3_method} \alias{local_mocked_s4_method} \title{Mock S3 and S4 methods} \usage{ local_mocked_s3_method(generic, signature, definition, frame = caller_env()) local_mocked_s4_method(generic, signature, definition, frame = caller_env()) } \arguments{ \item{generic}{A string giving the name of the generic.} \item{signature}{A character vector giving the signature of the method.} \item{definition}{A function providing the method definition, or \code{NULL} to temporarily remove the method.} \item{frame}{Calling frame which determines the scope of the mock. Only needed when wrapping in another local helper.} } \description{ These functions temporarily override S3 or S4 methods. They can mock methods that don't already exist, or temporarily remove a method by setting \code{definition = NULL}. Learn more about mocking in \code{vignette("mocking")}. } \examples{ x <- as.POSIXlt(Sys.time()) local({ local_mocked_s3_method("length", "POSIXlt", function(x) 42) length(x) }) length(x) } testthat/man/expect_output_file.Rd0000644000176200001440000000152215047715224017072 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-known.R \name{expect_output_file} \alias{expect_output_file} \title{Do you expect the output/result to equal a known good value?} \usage{ expect_output_file( object, file, update = TRUE, ..., info = NULL, label = NULL, print = FALSE, width = 80 ) } \description{ \code{expect_output_file()} behaves identically to \code{\link[=expect_known_output]{expect_known_output()}}. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{expect_output_file()} is deprecated in the 3rd edition; please use \code{\link[=expect_snapshot_output]{expect_snapshot_output()}} and friends instead. } \keyword{internal} testthat/man/inheritance-expectations.Rd0000644000176200001440000000754515054053615020170 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-inheritance.R \name{inheritance-expectations} \alias{inheritance-expectations} \alias{expect_type} \alias{expect_s3_class} \alias{expect_s4_class} \alias{expect_r6_class} \alias{expect_s7_class} \title{Do you expect an S3/S4/R6/S7 object that inherits from this class?} \usage{ expect_type(object, type) expect_s3_class(object, class, exact = FALSE) expect_s4_class(object, class) expect_r6_class(object, class) expect_s7_class(object, class) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{type}{String giving base type (as returned by \code{\link[=typeof]{typeof()}}).} \item{class}{The required type varies depending on the function: \itemize{ \item \code{expect_type()}: a string. \item \code{expect_s3_class()}: a string or character vector. The behaviour of multiple values (i.e. a character vector) is controlled by the \code{exact} argument. \item \code{expect_s4_class()}: a string. \item \code{expect_r6_class()}: a string. \item \code{expect_s7_class()}: an \code{\link[S7:S7_class]{S7::S7_class()}} object. } For historical reasons, \code{expect_s3_class()} and \code{expect_s4_class()} also take \code{NA} to assert that the \code{object} is not an S3 or S4 object.} \item{exact}{If \code{FALSE}, the default, checks that \code{object} inherits from any element of \code{class}. If \code{TRUE}, checks that object has a class that exactly matches \code{class}.} } \description{ See \url{https://adv-r.hadley.nz/oo.html} for an overview of R's OO systems, and the vocabulary used here. \itemize{ \item \code{expect_type(x, type)} checks that \code{typeof(x)} is \code{type}. \item \code{expect_s3_class(x, class)} checks that \code{x} is an S3 object that \code{\link[=inherits]{inherits()}} from \code{class} \item \code{expect_s3_class(x, NA)} checks that \code{x} isn't an S3 object. \item \code{expect_s4_class(x, class)} checks that \code{x} is an S4 object that \code{\link[=is]{is()}} \code{class}. \item \code{expect_s4_class(x, NA)} checks that \code{x} isn't an S4 object. \item \code{expect_r6_class(x, class)} checks that \code{x} an R6 object that inherits from \code{class}. \item \code{expect_s7_class(x, Class)} checks that \code{x} is an S7 object that \code{\link[S7:S7_inherits]{S7::S7_inherits()}} from \code{Class} } See \code{\link[=expect_vector]{expect_vector()}} for testing properties of objects created by vctrs. } \examples{ x <- data.frame(x = 1:10, y = "x", stringsAsFactors = TRUE) # A data frame is an S3 object with class data.frame expect_s3_class(x, "data.frame") show_failure(expect_s4_class(x, "data.frame")) # A data frame is built from a list: expect_type(x, "list") f <- factor(c("a", "b", "c")) o <- ordered(f) # Using multiple class names tests if the object inherits from any of them expect_s3_class(f, c("ordered", "factor")) # Use exact = TRUE to test for exact match show_failure(expect_s3_class(f, c("ordered", "factor"), exact = TRUE)) expect_s3_class(o, c("ordered", "factor"), exact = TRUE) # An integer vector is an atomic vector of type "integer" expect_type(x$x, "integer") # It is not an S3 object show_failure(expect_s3_class(x$x, "integer")) # Above, we requested data.frame() converts strings to factors: show_failure(expect_type(x$y, "character")) expect_s3_class(x$y, "factor") expect_type(x$y, "integer") } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/expect_snapshot_output.Rd0000644000176200001440000000465615054412736020025 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snapshot.R \name{expect_snapshot_output} \alias{expect_snapshot_output} \alias{expect_snapshot_error} \alias{expect_snapshot_warning} \title{Snapshot helpers} \usage{ expect_snapshot_output(x, cran = FALSE, variant = NULL) expect_snapshot_error(x, class = "error", cran = FALSE, variant = NULL) expect_snapshot_warning(x, class = "warning", cran = FALSE, variant = NULL) } \arguments{ \item{x}{Code to evaluate.} \item{cran}{Should these expectations be verified on CRAN? By default, they are not, because snapshot tests tend to be fragile because they often rely on minor details of dependencies.} \item{variant}{If non-\code{NULL}, results will be saved in \verb{_snaps/\{variant\}/\{test.md\}}, so \code{variant} must be a single string suitable for use as a directory name. You can use variants to deal with cases where the snapshot output varies and you want to capture and test the variations. Common use cases include variations for operating system, R version, or version of key dependency. Variants are an advanced feature. When you use them, you'll need to carefully think about your testing strategy to ensure that all important variants are covered by automated tests, and ensure that you have a way to get snapshot changes out of your CI system and back into the repo. Note that there's no way to declare all possible variants up front which means that as soon as you start using variants, you are responsible for deleting snapshot variants that are no longer used. (testthat will still delete all variants if you delete the test.)} \item{class}{Class of expected error or warning. The expectation will always fail (even on CRAN) if an error of this class isn't seen when executing \code{x}.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#questioning}{\figure{lifecycle-questioning.svg}{options: alt='[Questioning]'}}}{\strong{[Questioning]}} These snapshotting functions are questioning because they were developed before \code{\link[=expect_snapshot]{expect_snapshot()}} and we're not sure that they still have a role to play. \itemize{ \item \code{expect_snapshot_output()} captures just output printed to the console. \item \code{expect_snapshot_error()} captures an error message and optionally checks its class. \item \code{expect_snapshot_warning()} captures a warning message and optionally checks its class. } } \keyword{internal} testthat/man/default_reporter.Rd0000644000176200001440000000312715127561732016536 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter.R \name{default_reporter} \alias{default_reporter} \alias{default_compact_reporter} \alias{check_reporter} \title{Determine default reporters} \usage{ default_reporter(parallel = FALSE) default_compact_reporter() check_reporter() } \arguments{ \item{parallel}{If \code{TRUE}, return a reporter suitable for parallel testing.} } \description{ These three functions are used to determine the default reporters used for \code{test_dir()}, \code{test_file()}, and \code{test_package()}: \itemize{ \item \code{default_reporter()} returns the default reporter for \code{\link[=test_dir]{test_dir()}}. If \code{parallel} is \code{TRUE}, it uses \link{ParallelProgressReporter}, which you can override with option \code{testthat.default_parallel_reporter}. If \code{parallel} is \code{FALSE}, it uses \link{ProgressReporter}, which you can override with option \code{testthat.default_reporter}. \item \code{default_compact_reporter()} returns the default reporter for \code{\link[=test_file]{test_file()}}. It defaults to \link{CompactProgressReporter}, which you can override with the \code{testthat.default_compact_reporter} option. \item \code{check_reporter()} returns the default reporter for \code{\link[=test_package]{test_package()}}. It defaults to \link{CheckReporter}, which you can override with the \code{testthat.default_check_reporter} option. } Both \code{default_reporter()} and \code{default_compact_reporter()} will use \link{LlmReporter} if it appears that the tests are being run by a coding agent. } \keyword{internal} testthat/man/dir_state.Rd0000644000176200001440000000073713051613152015136 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/watcher.R \name{dir_state} \alias{dir_state} \title{Capture the state of a directory.} \usage{ dir_state(path, pattern = NULL, hash = TRUE) } \arguments{ \item{path}{path to directory} \item{pattern}{regular expression with which to filter files} \item{hash}{use hash (slow but accurate) or time stamp (fast but less accurate)} } \description{ Capture the state of a directory. } \keyword{internal} testthat/man/logical-expectations.Rd0000644000176200001440000000344115072252215017275 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-constant.R \name{logical-expectations} \alias{logical-expectations} \alias{expect_true} \alias{expect_false} \title{Do you expect \code{TRUE} or \code{FALSE}?} \usage{ expect_true(object, info = NULL, label = NULL) expect_false(object, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \description{ These are fall-back expectations that you can use when none of the other more specific expectations apply. The disadvantage is that you may get a less informative error message. Attributes are ignored. } \examples{ expect_true(2 == 2) # Failed expectations will throw an error show_failure(expect_true(2 != 2)) # where possible, use more specific expectations, to get more informative # error messages a <- 1:4 show_failure(expect_true(length(a) == 3)) show_failure(expect_equal(length(a), 3)) x <- c(TRUE, TRUE, FALSE, TRUE) show_failure(expect_true(all(x))) show_failure(expect_all_true(x)) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}} } \concept{expectations} testthat/man/use_catch.Rd0000644000176200001440000001326415072252215015121 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-compiled-code.R \name{use_catch} \alias{use_catch} \title{Use Catch for C++ unit testing} \usage{ use_catch(dir = getwd()) } \arguments{ \item{dir}{The directory containing an \R package.} } \description{ Add the necessary infrastructure to enable C++ unit testing in \R packages with \href{https://github.com/catchorg/Catch2}{Catch} and \code{testthat}. } \details{ Calling \code{use_catch()} will: \enumerate{ \item Create a file \code{src/test-runner.cpp}, which ensures that the \code{testthat} package will understand how to run your package's unit tests, \item Create an example test file \code{src/test-example.cpp}, which showcases how you might use Catch to write a unit test, \item Add a test file \code{tests/testthat/test-cpp.R}, which ensures that \code{testthat} will run your compiled tests during invocations of \code{devtools::test()} or \verb{R CMD check}, and \item Create a file \code{R/catch-routine-registration.R}, which ensures that \R will automatically register this routine when \code{tools::package_native_routine_registration_skeleton()} is invoked. } You will also need to: \itemize{ \item Add xml2 to Suggests, with e.g. \code{usethis::use_package("xml2", "Suggests")} \item Add testthat to LinkingTo, with e.g. \code{usethis::use_package("testthat", "LinkingTo")} } C++ unit tests can be added to C++ source files within the \code{src} directory of your package, with a format similar to \R code tested with \code{testthat}. Here's a simple example of a unit test written with \code{testthat} + Catch: \preformatted{ context("C++ Unit Test") { test_that("two plus two is four") { int result = 2 + 2; expect_true(result == 4); } } } When your package is compiled, unit tests alongside a harness for running these tests will be compiled into your \R package, with the C entry point \code{run_testthat_tests()}. \code{testthat} will use that entry point to run your unit tests when detected. } \section{Functions}{ All of the functions provided by Catch are available with the \code{CATCH_} prefix -- see \href{https://github.com/catchorg/Catch2/blob/master/docs/assertions.md}{here} for a full list. \code{testthat} provides the following wrappers, to conform with \code{testthat}'s \R interface: \tabular{lll}{ \strong{Function} \tab \strong{Catch} \tab \strong{Description} \cr \code{context} \tab \code{CATCH_TEST_CASE} \tab The context of a set of tests. \cr \code{test_that} \tab \code{CATCH_SECTION} \tab A test section. \cr \code{expect_true} \tab \code{CATCH_CHECK} \tab Test that an expression evaluates to \code{TRUE}. \cr \code{expect_false} \tab \code{CATCH_CHECK_FALSE} \tab Test that an expression evaluates to \code{FALSE}. \cr \code{expect_error} \tab \code{CATCH_CHECK_THROWS} \tab Test that evaluation of an expression throws an exception. \cr \code{expect_error_as} \tab \code{CATCH_CHECK_THROWS_AS} \tab Test that evaluation of an expression throws an exception of a specific class. \cr } In general, you should prefer using the \code{testthat} wrappers, as \code{testthat} also does some work to ensure that any unit tests within will not be compiled or run when using the Solaris Studio compilers (as these are currently unsupported by Catch). This should make it easier to submit packages to CRAN that use Catch. } \section{Symbol Registration}{ If you've opted to disable dynamic symbol lookup in your package, then you'll need to explicitly export a symbol in your package that \code{testthat} can use to run your unit tests. \code{testthat} will look for a routine with one of the names: \preformatted{ C_run_testthat_tests c_run_testthat_tests run_testthat_tests } Assuming you have \verb{useDynLib(, .registration = TRUE)} in your package's \code{NAMESPACE} file, this implies having routine registration code of the form: \if{html}{\out{
    }}\preformatted{// The definition for this function comes from the file 'src/test-runner.cpp', // which is generated via `testthat::use_catch()`. extern SEXP run_testthat_tests(); static const R_CallMethodDef callMethods[] = \{ // other .Call method definitions, \{"run_testthat_tests", (DL_FUNC) &run_testthat_tests, 0\}, \{NULL, NULL, 0\} \}; void R_init_(DllInfo* dllInfo) \{ R_registerRoutines(dllInfo, NULL, callMethods, NULL, NULL); R_useDynamicSymbols(dllInfo, FALSE); \} }\if{html}{\out{
    }} replacing \verb{} above with the name of your package, as appropriate. See \href{https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Controlling-visibility}{Controlling Visibility} and \href{https://cran.r-project.org/doc/manuals/r-release/R-exts.html#Registering-symbols}{Registering Symbols} in the \strong{Writing R Extensions} manual for more information. } \section{Advanced Usage}{ If you'd like to write your own Catch test runner, you can instead use the \code{testthat::catchSession()} object in a file with the form: \preformatted{ #define TESTTHAT_TEST_RUNNER #include void run() { Catch::Session& session = testthat::catchSession(); // interact with the session object as desired } } This can be useful if you'd like to run your unit tests with custom arguments passed to the Catch session. } \section{Standalone Usage}{ If you'd like to use the C++ unit testing facilities provided by Catch, but would prefer not to use the regular \code{testthat} \R testing infrastructure, you can manually run the unit tests by inserting a call to: \preformatted{ .Call("run_testthat_tests", PACKAGE = ) } as necessary within your unit test suite. } \seealso{ \href{https://github.com/catchorg/Catch2/blob/master/docs/assertions.md}{Catch}, the library used to enable C++ unit testing. } testthat/man/SilentReporter.Rd0000644000176200001440000000166715127561732016160 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-silent.R \name{SilentReporter} \alias{SilentReporter} \title{Silently collect and all expectations} \description{ This reporter quietly runs all tests, simply gathering all expectations. This is helpful for programmatically inspecting errors after a test run. You can retrieve the results with \verb{$expectations()}. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/test_env.Rd0000644000176200001440000000101214164710002014770 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-env.R \name{test_env} \alias{test_env} \title{Generate default testing environment.} \usage{ test_env(package = NULL) } \description{ We use a new environment which inherits from \code{\link[=globalenv]{globalenv()}} or a package namespace. In an ideal world, we'd avoid putting the global environment on the search path for tests, but it's not currently possible without losing the ability to load packages in tests. } \keyword{internal} testthat/man/expect.Rd0000644000176200001440000000337315104404205014445 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expectation.R \name{expect} \alias{expect} \title{The previous building block of all \code{expect_} functions} \usage{ expect( ok, failure_message, info = NULL, srcref = NULL, trace = NULL, trace_env = caller_env() ) } \arguments{ \item{ok}{\code{TRUE} or \code{FALSE} indicating if the expectation was successful.} \item{failure_message}{A character vector describing the failure. The first element should describe the expected value, and the second (and optionally subsequence) elements should describe what was actually seen.} \item{info}{Character vector continuing additional information. Included for backward compatibility only and new expectations should not use it.} \item{srcref}{Location of the failure. Should only needed to be explicitly supplied when you need to forward a srcref captured elsewhere.} \item{trace}{An optional backtrace created by \code{\link[rlang:trace_back]{rlang::trace_back()}}. When supplied, the expectation is displayed with the backtrace. Expert use only.} \item{trace_env}{If \code{trace} is not specified, this is used to generate an informative traceback for failures. You should only need to set this if you're calling \code{fail()} from a helper function; see \code{vignette("custom-expectation")} for details.} } \value{ An expectation object from either \code{succeed()} or \code{fail()}. with a \code{muffle_expectation} restart. } \description{ Previously, we recommended using \code{expect()} when writing your own expectations. Now we instead recommend \code{\link[=pass]{pass()}} and \code{\link[=fail]{fail()}}. See \code{vignette("custom-expectation")} for details. } \seealso{ \code{\link[=exp_signal]{exp_signal()}} } \keyword{internal} testthat/man/test_package.Rd0000644000176200001440000000461315054053615015616 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-package.R \name{test_package} \alias{test_package} \alias{test_check} \alias{test_local} \title{Run all tests in a package} \usage{ test_package(package, reporter = check_reporter(), ...) test_check(package, reporter = check_reporter(), ...) test_local( path = ".", reporter = NULL, ..., load_package = "source", shuffle = FALSE ) } \arguments{ \item{package}{If these tests belong to a package, the name of the package.} \item{reporter}{Reporter to use to summarise output. Can be supplied as a string (e.g. "summary") or as an R6 object (e.g. \code{SummaryReporter$new()}). See \link{Reporter} for more details and a list of built-in reporters.} \item{...}{Additional arguments passed to \code{\link[=test_dir]{test_dir()}}} \item{path}{Path to directory containing tests.} \item{load_package}{Strategy to use for load package code: \itemize{ \item "none", the default, doesn't load the package. \item "installed", uses \code{\link[=library]{library()}} to load an installed package. \item "source", uses \code{\link[pkgload:load_all]{pkgload::load_all()}} to a source package. To configure the arguments passed to \code{load_all()}, add this field in your DESCRIPTION file: \if{html}{\out{
    }}\preformatted{Config/testthat/load-all: list(export_all = FALSE, helpers = FALSE) }\if{html}{\out{
    }} }} \item{shuffle}{If \code{TRUE}, randomly reorder the top-level expressions in the file.} } \value{ A list (invisibly) containing data about the test results. } \description{ \itemize{ \item \code{test_local()} tests a local source package. \item \code{test_package()} tests an installed package. \item \code{test_check()} checks a package during \verb{R CMD check}. } See \code{vignette("special-files")} to learn about the various files that testthat works with. } \section{\verb{R CMD check}}{ To run testthat automatically from \verb{R CMD check}, make sure you have a \code{tests/testthat.R} that contains: \if{html}{\out{
    }}\preformatted{library(testthat) library(yourpackage) test_check("yourpackage") }\if{html}{\out{
    }} } \section{Environments}{ Each test is run in a clean environment to keep tests as isolated as possible. For package tests, that environment inherits from the package's namespace environment, so that tests can access internal functions and objects. } testthat/man/expect_named.Rd0000644000176200001440000000366415054053615015625 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-named.R \name{expect_named} \alias{expect_named} \title{Do you expect a vector with (these) names?} \usage{ expect_named( object, expected, ignore.order = FALSE, ignore.case = FALSE, info = NULL, label = NULL ) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{expected}{Character vector of expected names. Leave missing to match any names. Use \code{NULL} to check for absence of names.} \item{ignore.order}{If \code{TRUE}, sorts names before comparing to ignore the effect of order.} \item{ignore.case}{If \code{TRUE}, lowercases all names to ignore the effect of case.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \description{ You can either check for the presence of names (leaving \code{expected} blank), specific names (by supplying a vector of names), or absence of names (with \code{NULL}). } \examples{ x <- c(a = 1, b = 2, c = 3) expect_named(x) expect_named(x, c("a", "b", "c")) # Use options to control sensitivity expect_named(x, c("B", "C", "A"), ignore.order = TRUE, ignore.case = TRUE) # Can also check for the absence of names with NULL z <- 1:4 expect_named(z, NULL) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/test_path.Rd0000644000176200001440000000155215040747537015166 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/test-path.R \name{test_path} \alias{test_path} \title{Locate a file in the testing directory} \usage{ test_path(...) } \arguments{ \item{...}{Character vectors giving path components.} } \value{ A character vector giving the path. } \description{ Many tests require some external file (e.g. a \code{.csv} if you're testing a data import function) but the working directory varies depending on the way that you're running the test (e.g. interactively, with \code{devtools::test()}, or with \verb{R CMD check}). \code{test_path()} understands these variations and automatically generates a path relative to \code{tests/testthat}, regardless of where that directory might reside relative to the current working directory. } \examples{ \dontrun{ test_path("foo.csv") test_path("data", "foo.csv") } } testthat/man/TapReporter.Rd0000644000176200001440000000165015127561732015436 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-tap.R \name{TapReporter} \alias{TapReporter} \title{Report results in TAP format} \description{ This reporter will output results in the Test Anything Protocol (TAP), a simple text-based interface between testing modules in a test harness. For more information about TAP, see http://testanything.org } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{ProgressReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/expect_vector.Rd0000644000176200001440000000233115072252215016026 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-vector.R \name{expect_vector} \alias{expect_vector} \title{Do you expect a vector with this size and/or prototype?} \usage{ expect_vector(object, ptype = NULL, size = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{ptype}{(Optional) Vector prototype to test against. Should be a size-0 (empty) generalised vector.} \item{size}{(Optional) Size to check for.} } \description{ \code{expect_vector()} is a thin wrapper around \code{\link[vctrs:vec_assert]{vctrs::vec_assert()}}, converting the results of that function in to the expectations used by testthat. This means that it used the vctrs of \code{ptype} (prototype) and \code{size}. See details in \url{https://vctrs.r-lib.org/articles/type-size.html} } \examples{ \dontshow{if (requireNamespace("vctrs")) withAutoprint(\{ # examplesIf} expect_vector(1:10, ptype = integer(), size = 10) show_failure(expect_vector(1:10, ptype = integer(), size = 5)) show_failure(expect_vector(1:10, ptype = character(), size = 5)) \dontshow{\}) # examplesIf} } testthat/man/expect_equivalent.Rd0000644000176200001440000000276515040747537016727 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-equality.R \name{expect_equivalent} \alias{expect_equivalent} \title{Is an object equal to the expected value, ignoring attributes?} \usage{ expect_equivalent( object, expected, ..., info = NULL, label = NULL, expected.label = NULL ) } \arguments{ \item{object, expected}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{...}{Passed on to \code{\link[=compare]{compare()}}.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label, expected.label}{Used to customise failure messages. For expert use only.} } \description{ Compares \code{object} and \code{expected} using \code{\link[=all.equal]{all.equal()}} and \code{check.attributes = FALSE}. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{expect_equivalent()} is deprecated in the 3rd edition. Instead use \code{expect_equal(ignore_attr = TRUE)}. } \examples{ #' # expect_equivalent() ignores attributes a <- b <- 1:3 names(b) <- letters[1:3] \dontrun{ expect_equal(a, b) } expect_equivalent(a, b) } \keyword{internal} testthat/man/ProgressReporter.Rd0000644000176200001440000000260715127561732016521 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reporter-progress.R \name{ProgressReporter} \alias{ProgressReporter} \alias{CompactProgressReporter} \alias{ParallelProgressReporter} \title{Report progress interactively} \description{ \code{ProgressReporter} is designed for interactive use. Its goal is to give you actionable insights to help you understand the status of your code. This reporter also praises you from time-to-time if all your tests pass. It's the default reporter for \code{\link[=test_dir]{test_dir()}}. \code{ParallelProgressReporter} is very similar to \code{ProgressReporter}, but works better for packages that want parallel tests. \code{CompactProgressReporter} is a minimal version of \code{ProgressReporter} designed for use with single files. It's the default reporter for \code{\link[=test_file]{test_file()}}. } \seealso{ Other reporters: \code{\link{CheckReporter}}, \code{\link{DebugReporter}}, \code{\link{FailReporter}}, \code{\link{JunitReporter}}, \code{\link{ListReporter}}, \code{\link{LlmReporter}}, \code{\link{LocationReporter}}, \code{\link{MinimalReporter}}, \code{\link{MultiReporter}}, \code{\link{RStudioReporter}}, \code{\link{Reporter}}, \code{\link{SilentReporter}}, \code{\link{SlowReporter}}, \code{\link{StopReporter}}, \code{\link{SummaryReporter}}, \code{\link{TapReporter}}, \code{\link{TeamcityReporter}} } \concept{reporters} testthat/man/capture_condition.Rd0000644000176200001440000000350715040747537016706 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/deprec-condition.R \name{capture_condition} \alias{capture_condition} \alias{capture_error} \alias{capture_expectation} \alias{capture_message} \alias{capture_warning} \alias{capture_messages} \alias{capture_warnings} \title{Capture conditions, including messages, warnings, expectations, and errors.} \usage{ capture_condition(code, entrace = FALSE) capture_error(code, entrace = FALSE) capture_expectation(code, entrace = FALSE) capture_message(code, entrace = FALSE) capture_warning(code, entrace = FALSE) capture_messages(code) capture_warnings(code, ignore_deprecation = FALSE) } \arguments{ \item{code}{Code to evaluate} \item{entrace}{Whether to add a \link[rlang:trace_back]{backtrace} to the captured condition.} } \value{ Singular functions (\code{capture_condition}, \code{capture_expectation} etc) return a condition object. \code{capture_messages()} and \code{capture_warnings} return a character vector of message text. } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} These functions allow you to capture the side-effects of a function call including printed output, messages and warnings. We no longer recommend that you use these functions, instead relying on the \code{\link[=expect_message]{expect_message()}} and friends to bubble up unmatched conditions. If you just want to silence unimportant warnings, use \code{\link[=suppressWarnings]{suppressWarnings()}}. } \examples{ f <- function() { message("First") warning("Second") message("Third") } capture_message(f()) capture_messages(f()) capture_warning(f()) capture_warnings(f()) # Condition will capture anything capture_condition(f()) } \keyword{internal} testthat/man/context.Rd0000644000176200001440000000234614164710002014640 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/context.R \name{context} \alias{context} \title{Describe the context of a set of tests.} \usage{ context(desc) } \arguments{ \item{desc}{description of context. Should start with a capital letter.} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} Use of \code{context()} is no longer recommended. Instead omit it, and messages will use the name of the file instead. This ensures that the context and test file name are always in sync. A context defines a set of tests that test related functionality. Usually you will have one context per file, but you may have multiple contexts in a single file if you so choose. } \section{3rd edition}{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} \code{context()} is deprecated in the third edition, and the equivalent information is instead recorded by the test file name. } \examples{ context("String processing") context("Remote procedure calls") } \keyword{internal} testthat/man/fail.Rd0000644000176200001440000000363415072252215014076 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-that.R \name{fail} \alias{fail} \alias{pass} \title{Declare that an expectation either passes or fails} \usage{ fail( message = "Failure has been forced", info = NULL, srcref = NULL, trace_env = caller_env(), trace = NULL ) pass() } \arguments{ \item{message}{A character vector describing the failure. The first element should describe the expected value, and the second (and optionally subsequence) elements should describe what was actually seen.} \item{info}{Character vector continuing additional information. Included for backward compatibility only and new expectations should not use it.} \item{srcref}{Location of the failure. Should only needed to be explicitly supplied when you need to forward a srcref captured elsewhere.} \item{trace_env}{If \code{trace} is not specified, this is used to generate an informative traceback for failures. You should only need to set this if you're calling \code{fail()} from a helper function; see \code{vignette("custom-expectation")} for details.} \item{trace}{An optional backtrace created by \code{\link[rlang:trace_back]{rlang::trace_back()}}. When supplied, the expectation is displayed with the backtrace. Expert use only.} } \description{ These are the primitives that you can use to implement your own expectations. Every path through an expectation should either call \code{pass()}, \code{fail()}, or throw an error (e.g. if the arguments are invalid). Expectations should always return \code{invisible(act$val)}. Learn more about creating your own expectations in \code{vignette("custom-expectation")}. } \examples{ expect_length <- function(object, n) { act <- quasi_label(rlang::enquo(object), arg = "object") act_n <- length(act$val) if (act_n != n) { fail(sprintf("\%s has length \%i, not length \%i.", act$lab, act_n, n)) } else { pass() } invisible(act$val) } } testthat/man/succeed.Rd0000644000176200001440000000137615072252215014577 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-that.R \name{succeed} \alias{succeed} \title{Mark a test as successful} \usage{ succeed(message = "Success has been forced", info = NULL) } \arguments{ \item{message}{A character vector describing the failure. The first element should describe the expected value, and the second (and optionally subsequence) elements should describe what was actually seen.} \item{info}{Character vector continuing additional information. Included for backward compatibility only and new expectations should not use it.} } \description{ This is an older version of \code{\link[=pass]{pass()}} that exists for backwards compatibility. You should now use \code{pass()} instead. } \keyword{internal} testthat/man/local_test_directory.Rd0000644000176200001440000000064614164710002017372 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/local.R \name{local_test_directory} \alias{local_test_directory} \title{Locally set test directory options} \usage{ local_test_directory(path, package = NULL, .env = parent.frame()) } \arguments{ \item{path}{Path to directory of files} \item{package}{Optional package name, if known.} } \description{ For expert use only. } \keyword{internal} testthat/man/describe.Rd0000644000176200001440000000446515047715224014754 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/describe.R \name{describe} \alias{describe} \alias{it} \title{describe: a BDD testing language} \usage{ describe(description, code) it(description, code = NULL) } \arguments{ \item{description}{description of the feature} \item{code}{test code containing the specs} } \description{ A simple \href{https://en.wikipedia.org/wiki/Behavior-driven_development}{behavior-driven development (BDD)} \href{https://en.wikipedia.org/wiki/Domain-specific_language}{domain-specific language} for writing tests. The language is similar to \href{https://rspec.info/}{RSpec} for Ruby or \href{https://mochajs.org/}{Mocha} for JavaScript. BDD tests read like sentences and it should thus be easier to understand what the specification of a function/component is. } \details{ Tests using the \code{describe} syntax not only verify the tested code, but also document its intended behaviour. Each \code{describe} block specifies a larger component or function and contains a set of specifications. A specification is defined by an \code{it} block. Each \code{it} block functions as a test and is evaluated in its own environment. You can also have nested \code{describe} blocks. This test syntax helps to test the intended behaviour of your code. For example: you want to write a new function for your package. Try to describe the specification first using \code{describe}, before your write any code. After that, you start to implement the tests for each specification (i.e. the \code{it} block). Use \code{describe} to verify that you implement the right things and use \code{\link[=test_that]{test_that()}} to ensure you do the things right. } \examples{ describe("matrix()", { it("can be multiplied by a scalar", { m1 <- matrix(1:4, 2, 2) m2 <- m1 * 2 expect_equal(matrix(1:4 * 2, 2, 2), m2) }) it("can have not yet tested specs") }) # Nested specs: ## code addition <- function(a, b) a + b division <- function(a, b) a / b ## specs describe("math library", { describe("addition()", { it("can add two numbers", { expect_equal(1 + 1, addition(1, 1)) }) }) describe("division()", { it("can divide two numbers", { expect_equal(10 / 2, division(10, 2)) }) it("can handle division by 0") #not yet implemented }) }) } \keyword{internal} testthat/man/expect_error.Rd0000644000176200001440000001456315054053615015672 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-condition.R \name{expect_error} \alias{expect_error} \alias{expect_warning} \alias{expect_message} \alias{expect_condition} \title{Do you expect an error, warning, message, or other condition?} \usage{ expect_error( object, regexp = NULL, class = NULL, ..., inherit = TRUE, info = NULL, label = NULL ) expect_warning( object, regexp = NULL, class = NULL, ..., inherit = TRUE, all = FALSE, info = NULL, label = NULL ) expect_message( object, regexp = NULL, class = NULL, ..., inherit = TRUE, all = FALSE, info = NULL, label = NULL ) expect_condition( object, regexp = NULL, class = NULL, ..., inherit = TRUE, info = NULL, label = NULL ) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{regexp}{Regular expression to test against. \itemize{ \item A character vector giving a regular expression that must match the error message. \item If \code{NULL}, the default, asserts that there should be an error, but doesn't test for a specific value. \item If \code{NA}, asserts that there should be no errors, but we now recommend using \code{\link[=expect_no_error]{expect_no_error()}} and friends instead. } Note that you should only use \code{message} with errors/warnings/messages that you generate. Avoid tests that rely on the specific text generated by another package since this can easily change. If you do need to test text generated by another package, either protect the test with \code{skip_on_cran()} or use \code{expect_snapshot()}.} \item{class}{Instead of supplying a regular expression, you can also supply a class name. This is useful for "classed" conditions.} \item{...}{ Arguments passed on to \code{\link[=expect_match]{expect_match}} \describe{ \item{\code{fixed}}{If \code{TRUE}, treats \code{regexp} as a string to be matched exactly (not a regular expressions). Overrides \code{perl}.} \item{\code{perl}}{logical. Should Perl-compatible regexps be used?} }} \item{inherit}{Whether to match \code{regexp} and \code{class} across the ancestry of chained errors.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} \item{all}{\emph{DEPRECATED} If you need to test multiple warnings/messages you now need to use multiple calls to \code{expect_message()}/ \code{expect_warning()}} } \value{ If \code{regexp = NA}, the value of the first argument; otherwise the captured condition. } \description{ \code{expect_error()}, \code{expect_warning()}, \code{expect_message()}, and \code{expect_condition()} check that code throws an error, warning, message, or condition with a message that matches \code{regexp}, or a class that inherits from \code{class}. See below for more details. In the 3rd edition, these functions match (at most) a single condition. All additional and non-matching (if \code{regexp} or \code{class} are used) conditions will bubble up outside the expectation. If these additional conditions are important you'll need to catch them with additional \code{expect_message()}/\code{expect_warning()} calls; if they're unimportant you can ignore with \code{\link[=suppressMessages]{suppressMessages()}}/\code{\link[=suppressWarnings]{suppressWarnings()}}. It can be tricky to test for a combination of different conditions, such as a message followed by an error. \code{\link[=expect_snapshot]{expect_snapshot()}} is often an easier alternative for these more complex cases. } \section{Testing \code{message} vs \code{class}}{ When checking that code generates an error, it's important to check that the error is the one you expect. There are two ways to do this. The first way is the simplest: you just provide a \code{regexp} that match some fragment of the error message. This is easy, but fragile, because the test will fail if the error message changes (even if its the same error). A more robust way is to test for the class of the error, if it has one. You can learn more about custom conditions at \url{https://adv-r.hadley.nz/conditions.html#custom-conditions}, but in short, errors are S3 classes and you can generate a custom class and check for it using \code{class} instead of \code{regexp}. If you are using \code{expect_error()} to check that an error message is formatted in such a way that it makes sense to a human, we recommend using \code{\link[=expect_snapshot]{expect_snapshot()}} instead. } \examples{ # Errors ------------------------------------------------------------------ f <- function() stop("My error!") expect_error(f()) expect_error(f(), "My error!") # You can use the arguments of grepl to control the matching expect_error(f(), "my error!", ignore.case = TRUE) # Note that `expect_error()` returns the error object so you can test # its components if needed err <- expect_error(rlang::abort("a", n = 10)) expect_equal(err$n, 10) # Warnings ------------------------------------------------------------------ f <- function(x) { if (x < 0) { warning("*x* is already negative") return(x) } -x } expect_warning(f(-1)) expect_warning(f(-1), "already negative") expect_warning(f(1), NA) # To test message and output, store results to a variable expect_warning(out <- f(-1), "already negative") expect_equal(out, -1) # Messages ------------------------------------------------------------------ f <- function(x) { if (x < 0) { message("*x* is already negative") return(x) } -x } expect_message(f(-1)) expect_message(f(-1), "already negative") expect_message(f(1), NA) } \seealso{ \code{\link[=expect_no_error]{expect_no_error()}}, \code{expect_no_warning()}, \code{expect_no_message()}, and \code{expect_no_condition()} to assert that code runs without errors/warnings/messages/conditions. Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_null}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/expect_all_equal.Rd0000644000176200001440000000217615072252215016472 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-all.R \name{expect_all_equal} \alias{expect_all_equal} \alias{expect_all_true} \alias{expect_all_false} \title{Do you expect every value in a vector to have this value?} \usage{ expect_all_equal(object, expected) expect_all_true(object) expect_all_false(object) } \arguments{ \item{object, expected}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} } \description{ These expectations are similar to \code{expect_true(all(x == "x"))}, \code{expect_true(all(x))} and \code{expect_true(all(!x))} but give more informative failure messages if the expectations are not met. } \examples{ x1 <- c(1, 1, 1, 1, 1, 1) expect_all_equal(x1, 1) x2 <- c(1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2) show_failure(expect_all_equal(x2, 1)) # expect_all_true() and expect_all_false() are helpers for common cases set.seed(1016) show_failure(expect_all_true(rpois(100, 10) < 20)) show_failure(expect_all_false(rpois(100, 10) > 20)) } testthat/man/context_start_file.Rd0000644000176200001440000000050214164710002017044 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/context.R \name{context_start_file} \alias{context_start_file} \title{Start test context from a file name} \usage{ context_start_file(name) } \arguments{ \item{name}{file name} } \description{ For use in external reporters } \keyword{internal} testthat/man/expect_snapshot_value.Rd0000644000176200001440000000573715054412736017602 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snapshot-value.R \name{expect_snapshot_value} \alias{expect_snapshot_value} \title{Do you expect this code to return the same value as last time?} \usage{ expect_snapshot_value( x, style = c("json", "json2", "deparse", "serialize"), cran = FALSE, tolerance = testthat_tolerance(), ..., variant = NULL ) } \arguments{ \item{x}{Code to evaluate.} \item{style}{Serialization style to use: \itemize{ \item \code{json} uses \code{\link[jsonlite:fromJSON]{jsonlite::fromJSON()}} and \code{\link[jsonlite:fromJSON]{jsonlite::toJSON()}}. This produces the simplest output but only works for relatively simple objects. \item \code{json2} uses \code{\link[jsonlite:serializeJSON]{jsonlite::serializeJSON()}} and \code{\link[jsonlite:serializeJSON]{jsonlite::unserializeJSON()}} which are more verbose but work for a wider range of type. \item \code{deparse} uses \code{\link[=deparse]{deparse()}}, which generates a depiction of the object using R code. \item \code{serialize()} produces a binary serialization of the object using \code{\link[=serialize]{serialize()}}. This is all but guaranteed to work for any R object, but produces a completely opaque serialization. }} \item{cran}{Should these expectations be verified on CRAN? By default, they are not, because snapshot tests tend to be fragile because they often rely on minor details of dependencies.} \item{tolerance}{Numerical tolerance: any differences (in the sense of \code{\link[base:all.equal]{base::all.equal()}}) smaller than this value will be ignored. The default tolerance is \code{sqrt(.Machine$double.eps)}, unless long doubles are not available, in which case the test is skipped.} \item{...}{Passed on to \code{\link[waldo:compare]{waldo::compare()}} so you can control the details of the comparison.} \item{variant}{If non-\code{NULL}, results will be saved in \verb{_snaps/\{variant\}/\{test.md\}}, so \code{variant} must be a single string suitable for use as a directory name. You can use variants to deal with cases where the snapshot output varies and you want to capture and test the variations. Common use cases include variations for operating system, R version, or version of key dependency. Variants are an advanced feature. When you use them, you'll need to carefully think about your testing strategy to ensure that all important variants are covered by automated tests, and ensure that you have a way to get snapshot changes out of your CI system and back into the repo. Note that there's no way to declare all possible variants up front which means that as soon as you start using variants, you are responsible for deleting snapshot variants that are no longer used. (testthat will still delete all variants if you delete the test.)} } \description{ Captures the result of function, flexibly serializing it into a text representation that's stored in a snapshot file. See \code{\link[=expect_snapshot]{expect_snapshot()}} for more details on snapshot testing. } testthat/man/local_snapshotter.Rd0000644000176200001440000000076115127731656016721 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/snapshot-reporter.R \name{local_snapshotter} \alias{local_snapshotter} \title{Instantiate local snapshotting context} \usage{ local_snapshotter( snap_dir = "_snaps", reporter = SnapshotReporter, cleanup = FALSE, desc = NULL, fail_on_new = NULL, frame = caller_env() ) } \description{ Needed if you want to run snapshot tests outside of the usual testthat framework For expert use only. } \keyword{internal} testthat/man/auto_test.Rd0000644000176200001440000000275315047715224015201 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/auto-test.R \name{auto_test} \alias{auto_test} \alias{auto_test_package} \title{Watches code and tests for changes, rerunning tests as appropriate.} \usage{ auto_test( code_path, test_path, reporter = default_reporter(), env = test_env(), hash = TRUE ) auto_test_package(pkg = ".", reporter = default_reporter(), hash = TRUE) } \arguments{ \item{code_path}{path to directory containing code} \item{test_path}{path to directory containing tests} \item{reporter}{test reporter to use} \item{env}{environment in which to execute test suite.} \item{hash}{Passed on to \code{\link[=watch]{watch()}}. When FALSE, uses less accurate modification time stamps, but those are faster for large files.} \item{pkg}{path to package} } \description{ \ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#superseded}{\figure{lifecycle-superseded.svg}{options: alt='[Superseded]'}}}{\strong{[Superseded]}} The idea behind \code{auto_test()} is that you just leave it running while you develop your code. Every time you save a file it will be automatically tested and you can easily see if your changes have caused any test failures. The current strategy for rerunning tests is as follows: \itemize{ \item if any code has changed, then those files are reloaded and all tests rerun \item otherwise, each new or modified test is run } } \seealso{ \code{\link[=auto_test_package]{auto_test_package()}} } \keyword{internal} testthat/man/expect_null.Rd0000644000176200001440000000250715054053615015506 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-constant.R \name{expect_null} \alias{expect_null} \title{Do you expect \code{NULL}?} \usage{ expect_null(object, info = NULL, label = NULL) } \arguments{ \item{object}{Object to test. Supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} \item{info}{Extra information to be included in the message. This argument is soft-deprecated and should not be used in new code. Instead see alternatives in \link{quasi_label}.} \item{label}{Used to customise failure messages. For expert use only.} } \description{ This is a special case because \code{NULL} is a singleton so it's possible check for it either with \code{expect_equal(x, NULL)} or \code{expect_type(x, "NULL")}. } \examples{ x <- NULL y <- 10 expect_null(x) show_failure(expect_null(y)) } \seealso{ Other expectations: \code{\link{comparison-expectations}}, \code{\link{equality-expectations}}, \code{\link{expect_error}()}, \code{\link{expect_length}()}, \code{\link{expect_match}()}, \code{\link{expect_named}()}, \code{\link{expect_output}()}, \code{\link{expect_reference}()}, \code{\link{expect_silent}()}, \code{\link{inheritance-expectations}}, \code{\link{logical-expectations}} } \concept{expectations} testthat/man/expect_setequal.Rd0000644000176200001440000000364215104404205016347 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/expect-setequal.R \name{expect_setequal} \alias{expect_setequal} \alias{expect_mapequal} \alias{expect_contains} \alias{expect_in} \alias{expect_disjoint} \title{Do you expect a vector containing these values?} \usage{ expect_setequal(object, expected) expect_mapequal(object, expected) expect_contains(object, expected) expect_in(object, expected) expect_disjoint(object, expected) } \arguments{ \item{object, expected}{Computation and value to compare it to. Both arguments supports limited unquoting to make it easier to generate readable failures within a function or for loop. See \link{quasi_label} for more details.} } \description{ \itemize{ \item \code{expect_setequal(x, y)} tests that every element of \code{x} occurs in \code{y}, and that every element of \code{y} occurs in \code{x}. \item \code{expect_contains(x, y)} tests that \code{x} contains every element of \code{y} (i.e. \code{y} is a subset of \code{x}). \item \code{expect_in(x, y)} tests that every element of \code{x} is in \code{y} (i.e. \code{x} is a subset of \code{y}). \item \code{expect_disjoint(x, y)} tests that no element of \code{x} is in \code{y} (i.e. \code{x} is disjoint from \code{y}). \item \code{expect_mapequal(x, y)} treats lists as if they are mappings between names and values. Concretely, checks that \code{x} and \code{y} have the same names, then checks that \code{x[names(y)]} equals \code{y}. } } \details{ Note that \code{expect_setequal()} ignores names, and you will be warned if both \code{object} and \code{expected} have them. } \examples{ expect_setequal(letters, rev(letters)) show_failure(expect_setequal(letters[-1], rev(letters))) x <- list(b = 2, a = 1) expect_mapequal(x, list(a = 1, b = 2)) show_failure(expect_mapequal(x, list(a = 1))) show_failure(expect_mapequal(x, list(a = 1, b = "x"))) show_failure(expect_mapequal(x, list(a = 1, b = 2, c = 3))) } testthat/DESCRIPTION0000644000176200001440000000336715130664352013636 0ustar liggesusersPackage: testthat Title: Unit Testing for R Version: 3.3.2 Authors@R: c( person("Hadley", "Wickham", , "hadley@posit.co", role = c("aut", "cre")), person("Posit Software, PBC", role = c("cph", "fnd")), person("R Core team", role = "ctb", comment = "Implementation of utils::recover()") ) Description: Software testing is important, but, in part because it is frustrating and boring, many of us avoid it. 'testthat' is a testing framework for R that is easy to learn and use, and integrates with your existing 'workflow'. License: MIT + file LICENSE URL: https://testthat.r-lib.org, https://github.com/r-lib/testthat BugReports: https://github.com/r-lib/testthat/issues Depends: R (>= 4.1.0) Imports: brio (>= 1.1.5), callr (>= 3.7.6), cli (>= 3.6.5), desc (>= 1.4.3), evaluate (>= 1.0.4), jsonlite (>= 2.0.0), lifecycle (>= 1.0.4), magrittr (>= 2.0.3), methods, pkgload (>= 1.4.0), praise (>= 1.0.0), processx (>= 3.8.6), ps (>= 1.9.1), R6 (>= 2.6.1), rlang (>= 1.1.6), utils, waldo (>= 0.6.2), withr (>= 3.0.2) Suggests: covr, curl (>= 0.9.5), diffviewer (>= 0.1.0), digest (>= 0.6.33), gh, knitr, otel, otelsdk, rmarkdown, rstudioapi, S7, shiny, usethis, vctrs (>= 0.1.0), xml2 VignetteBuilder: knitr Config/Needs/website: tidyverse/tidytemplate Config/testthat/edition: 3 Config/testthat/parallel: true Config/testthat/start-first: watcher, parallel* Encoding: UTF-8 RoxygenNote: 7.3.3 NeedsCompilation: yes Packaged: 2026-01-09 17:49:32 UTC; hadleywickham Author: Hadley Wickham [aut, cre], Posit Software, PBC [cph, fnd], R Core team [ctb] (Implementation of utils::recover()) Maintainer: Hadley Wickham Repository: CRAN Date/Publication: 2026-01-11 09:10:02 UTC