testthat/0000755000175100001440000000000012607405030012125 5ustar hornikuserstestthat/inst/0000755000175100001440000000000012247657347013126 5ustar hornikuserstestthat/inst/CITATION0000644000175100001440000000117412247657716014266 0ustar hornikuserscitHeader("To cite the testthat package in publications, use:") citEntry( entry = "Article", author = personList(as.person("Hadley Wickham")), title = "testthat: Get Started with Testing", journal = "The R Journal", year = 2011, volume = 3, pages = "5--10", url = "http://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf", textVersion = paste( "Hadley Wickham. testthat: Get Started with Testing.", "The R Journal, vol. 3, no. 1, pp. 5--10, 2011" ) ) citFooter("As testthat is continually evolving, you may want to cite its version number. Find it with 'help(package=testthat)'.") testthat/tests/0000755000175100001440000000000012453747623013307 5ustar hornikuserstestthat/tests/testthat.R0000644000175100001440000000011712453747623015271 0ustar hornikuserslibrary(testthat) options(testthat.use_colours = FALSE) test_check("testthat") testthat/tests/testthat/0000755000175100001440000000000012607405030015127 5ustar hornikuserstestthat/tests/testthat/test-compare.r0000644000175100001440000000362012602540002017710 0ustar hornikuserscontext("Compare") test_that("Equal strings with different attributes are not equal", { expect_false(compare(structure("text", y = "foo"), "text")$equal) }) test_that("different types of missing values are not equal", { expect_false(compare(NA, NA_character_)$equal) }) test_that("equal if both missing or both the same", { expect_true(compare(NA_character_, NA_character_)$equal) expect_true(compare("ABC", "ABC")$equal) expect_false(compare("ABC", NA_character_)$equal) expect_false(compare(NA_character_, "ABC")$equal) }) test_that("equal if both missing or both the same (multiple values)", { expect_true(compare(c("ABC", NA), c("ABC", NA))$equal) expect_false(compare(c(NA, NA), c("ABC", NA))$equal) expect_false(compare(c("AB", NA), c("ABC", NA))$equal) expect_false(compare(c("AB", "AB"), c("ABC", "AB"))$equal) }) test_that("computes correct number of mismatches", { comp <- compare(c("a","b","c"), c("c", "d", "e")) expect_match(comp$message, "3 string mismatches") }) test_that("comparing character and non-character fails back to all.equal", { expect_match(compare("abc", 1)$message, "target is character") }) test_that("unnamed arguments to all.equal passed through correctly", { expect_equal(415, 416, 0.01) }) test_that("comparing long character vectors with few differences", { cmp <- compare(letters, c(letters[-26], "a")) expect_match( cmp$message, paste("^", " string mismatch", "\\nx", "\\ny", "$", sep = "[^\\n]*")) }) test_that("comparing character vectors of different length", { cmp <- compare(letters, letters[-26]) expect_match( cmp$message, paste("^", "Lengths ", " differ\\n", " string mismatch", "\\nx", "$", sep = "[^\\n]*")) cmp <- compare(letters[-25:-26], letters) expect_match( cmp$message, paste("^", "Lengths ", " differ\\n", " string mismatch", "\\ny", "\\n", "\\ny", "$", sep = "[^\\n]*")) }) testthat/tests/testthat/test-reporter.r0000644000175100001440000000076112247653572020155 0ustar hornikuserscontext("Reporter") test_that("can locate reporter from name", { expect_that(find_reporter("minimal"), equals(MinimalReporter$new())) expect_that(find_reporter("summary"), equals(SummaryReporter$new())) expect_that(find_reporter("tap"), equals(TapReporter$new())) expect_that(find_reporter("list"), equals(ListReporter$new())) expect_that(find_reporter("multi"), equals(MultiReporter$new())) expect_that(find_reporter("blah"), throws_error("Can not find test reporter blah")) }) testthat/tests/testthat/test-reporter-multi.r0000644000175100001440000000066312247654663021310 0ustar hornikuserscontext("MultiReporter") test_that("MultiReporter", { reports <- lapply(seq_len(3), function(x) ListReporter$new()) reporter <- MultiReporter$new(reporters = reports) test_file("context.r", reporter) dfs <- lapply(reports, function(x) x$get_summary()) dfs2 <- lapply(dfs, function(x) { x$user <- x$system <- x$real <- NULL x }) expect_true(all(sapply(dfs2, function(x) identical(dfs2[[1]], x) ))) }) testthat/tests/testthat/test_dir/0000755000175100001440000000000012602555543016756 5ustar hornikuserstestthat/tests/testthat/test_dir/test-errors.r0000644000175100001440000000053212247654663021442 0ustar hornikuserscontext('error') test_that('simple', { stop('argh') }) test_that('after one success', { expect_true(TRUE) stop('argh') expect_true(TRUE) }) test_that('after one failure', { expect_true(FALSE) stop('argh') }) test_that('in the test', { expect_true(stop('Argh')) }) test_that('in expect_error', { expect_error(stop('Argh')) }) testthat/tests/testthat/test_dir/test-helper.r0000644000175100001440000000023512602544023021364 0ustar hornikusers# test that the companion helper script is sourced by test_dir context('helper') test_that('helper test', { expect_equal(hello(), 'Hello World') }) testthat/tests/testthat/test_dir/test-empty.r0000644000175100001440000000014312602302730021236 0ustar hornikuserscontext('empty') test_that('empty test', NULL) test_that('empty test with error', stop('Argh')) testthat/tests/testthat/test_dir/test-basic.r0000644000175100001440000000107512602544023021171 0ustar hornikuserscontext("Basic") test_that("logical tests act as expected", { expect_that(TRUE, is_true()) expect_that(FALSE, is_false()) }) test_that("logical tests ignore attributes", { expect_that(c(a = TRUE), is_true()) expect_that(c(a = FALSE), is_false()) }) test_that("equality holds", { expect_that(5, equals(5)) expect_that(10, is_identical_to(10)) }) test_that("can't access variables from other tests 2", { a <- 10 }) test_that("can't access variables from other tests 1", { expect_that(exists("a"), is_false()) }) testthat/tests/testthat/test_dir/helper_hello.r0000644000175100001440000000004212247654663021607 0ustar hornikusershello <- function() 'Hello World' testthat/tests/testthat/test_dir/test-failures.r0000644000175100001440000000036412247654663021743 0ustar hornikuserscontext('failures') test_that('just one failure', { expect_true(FALSE) }) test_that('one failure on two', { expect_false(FALSE) expect_true(FALSE) }) test_that('no failure', { expect_false(FALSE) expect_true(TRUE) }) testthat/tests/testthat/test_dir/test-bare-expectations.r0000644000175100001440000000007612247654663023546 0ustar hornikuserscontext("Bare") expect_that(1, equals(1)) expect_equal(2, 2) testthat/tests/testthat/test_dir/test-skip.r0000644000175100001440000000015612602555543021066 0ustar hornikuserscontext("skip") test_that("Skips skip", { skip("Skipping to avoid certain failure") expect_true(FALSE) })testthat/tests/testthat/test-silence.R0000644000175100001440000000015012602537442017655 0ustar hornikuserscontext("Silence") test_that("Nothing to see here", { message("YOU SHOULDN'T SEE ME") succeed() }) testthat/tests/testthat/test-basics.r0000644000175100001440000000103112602554777017550 0ustar hornikuserscontext("Basic tests") test_that("logical tests act as expected", { expect_that(TRUE, is_true()) expect_that(FALSE, is_false()) }) test_that("logical tests ignore attributes", { expect_that(c(a = TRUE), is_true()) expect_that(c(a = FALSE), is_false()) }) test_that("equality holds", { expect_that(5, equals(5)) expect_that(10, is_identical_to(10)) }) test_that("can't access variables from other tests 2", { a <- 10 }) test_that("can't access variables from other tests 1", { expect_that(exists("a"), is_false()) }) testthat/tests/testthat/test-mock.r0000644000175100001440000001240212602564565017235 0ustar hornikuserscontext("Mock") test_that("can make 3 = 5", { with_mock( compare = function(x, y, ...) list(equal = TRUE, message = "TRUE"), expect_equal(3, 5) ) expect_that(5, not(equals(3))) }) test_that("mocked function is restored on error", { expect_error( with_mock( compare = function(x, y, ...) list(equal = TRUE, message = "TRUE"), stop("Simulated error") ), "Simulated error" ) expect_that(5, not(equals(3))) }) test_that("non-empty mock with return value", { expect_true(with_mock( compare = function(x, y, ...) list(equal = TRUE, message = "TRUE"), TRUE )) }) test_that("multi-mock", { with_mock( gives_warning = throws_error, { expect_warning(stopifnot(compare(3, 5)$equal)) } ) expect_warning(warning("test")) }) test_that("nested mock", { with_mock( all.equal = function(x, y, ...) TRUE, { with_mock( gives_warning = throws_error, { expect_warning(stopifnot(!compare(3, "a")$equal)) } ) }, .env = asNamespace("base") ) expect_false(isTRUE(all.equal(3, 5))) expect_warning(warning("test")) }) test_that("qualified mock names", { with_mock( gives_warning = throws_error, `base::all.equal` = function(x, y, ...) TRUE, { expect_warning(stopifnot(!compare(3, "a")$equal)) } ) with_mock( `testthat::gives_warning` = throws_error, all.equal = function(x, y, ...) TRUE, { expect_warning(stopifnot(!compare(3, "a")$equal)) }, .env = asNamespace("base") ) expect_false(isTRUE(all.equal(3, 5))) expect_warning(warning("test")) }) test_that("can't mock non-existing", { expect_error(with_mock(`base::..bogus..` = identity, TRUE), "Function [.][.]bogus[.][.] not found in environment base") expect_error(with_mock(..bogus.. = identity, TRUE), "Function [.][.]bogus[.][.] not found in environment testthat") }) test_that("can't mock non-function", { expect_error(with_mock(.bg_colours = FALSE, TRUE), "Function [.]bg_colours not found in environment testthat") }) test_that("empty or no-op mock", { suppressWarnings({ expect_that(with_mock(), equals(invisible(NULL))) expect_that(with_mock(TRUE), equals(TRUE)) expect_that(with_mock(invisible(5)), equals(invisible(5))) }) expect_that(with_mock(), gives_warning("Not mocking anything.")) expect_that(with_mock(TRUE), gives_warning("Not mocking anything.")) expect_that(with_mock(invisible(5)), gives_warning("Not mocking anything.")) }) test_that("multiple return values", { expect_true(with_mock(FALSE, TRUE, `base::identity` = identity)) expect_equal(with_mock(3, `base::identity` = identity, 5), 5) }) test_that("can access variables defined in function", { x <- 5 suppressWarnings(expect_equal(with_mock(x), 5)) }) test_that("can mock both qualified and unqualified functions", { with_mock(`stats::acf` = identity, expect_identical(stats::acf, identity)) with_mock(`stats::acf` = identity, expect_identical(acf, identity)) with_mock(acf = identity, expect_identical(stats::acf, identity), .env = "stats") with_mock(acf = identity, expect_identical(acf, identity), .env = "stats") }) test_that("can mock hidden functions", { with_mock(`stats:::add1.default` = identity, expect_identical(stats:::add1.default, identity)) }) test_that("can mock if package is not loaded", { if ("package:devtools" %in% search()) skip('devtools is loaded') skip_if_not_installed("devtools") with_mock(`devtools::add_path` = identity, expect_identical(devtools::add_path, identity)) }) test_that("changes to variables are preserved between calls and visible outside", { x <- 1 with_mock( `base::identity` = identity, x <- 3, expect_equal(x, 3) ) expect_equal(x, 3) }) test_that("can mock function imported from other package", { with_mock(`testthat::setRefClass` = identity, expect_identical(setRefClass, identity)) with_mock(`methods::setRefClass` = identity, expect_identical(setRefClass, identity)) }) test_that("mock extraction", { expect_identical(extract_mocks(list(identity = identity), asNamespace("base"))$identity$name, as.name("identity")) expect_error(extract_mocks(list(..bogus.. = identity), asNamespace("base")), "Function [.][.]bogus[.][.] not found in environment base") expect_identical(extract_mocks(list(`base::identity` = identity), NULL)[[1]]$name, as.name("identity")) expect_identical(extract_mocks(list(`base::identity` = identity), NULL)[[1]]$env, asNamespace("base")) expect_identical(extract_mocks(list(identity = stop), "base")[[1]]$env, asNamespace("base")) expect_identical(extract_mocks(list(identity = stop), asNamespace("base"))[[1]]$env, asNamespace("base")) expect_identical(extract_mocks(list(`base::identity` = stop), NULL)[[1]]$orig_value, identity) expect_identical(extract_mocks(list(`base::identity` = stop), NULL)[[1]]$new_value, stop) expect_identical(extract_mocks(list(`base::identity` = stop), "stats")[[1]]$new_value, stop) expect_identical(extract_mocks(list(acf = identity), "stats")[[1]]$new_value, identity) expect_equal(length(extract_mocks(list(not = identity, `base::!` = identity), "testthat")), 2) }) test_that("mocks can access local variables", { value <- TRUE with_mock( expect_equal(2 * 3, 4), all.equal = function(x, y, ...) {value} ) }) testthat/tests/testthat/test-reporter-tap.r0000644000175100001440000000071212602555543020725 0ustar hornikuserscontext("TAP reporter") test_that("TAP reporter handles context and pass/fail/skip", { tap.report <- capture.output(test_dir("test_dir", reporter = "tap")) expect_identical(tap.report[1], "1..24") expect_identical(tap.report[2], "# Context Bare ") expect_true("ok 7 equality holds " %in% tap.report) expect_true("not ok 10 empty test with error " %in% tap.report) expect_true("ok 24 # SKIP Skipping to avoid certain failure " %in% tap.report) })testthat/tests/testthat/test-xxx.r0000644000175100001440000000311012453752173017125 0ustar hornikusers# Test that test_that succeeds or fails as expected. test_test_that <- function(desc, expr, failure_expected = TRUE) { reporter <- SilentReporter$new() old_reporter <- set_reporter(reporter) test_that(desc, expr) set_reporter(old_reporter) test_that(desc, { if (failure_expected) { info <- 'Test succeeded when failure expected' expect_equal(length(reporter$failures), 1, info = info) } else { info <- sprintf( 'Test failed unexpectedly: %s', as.character(reporter$failures[[desc]])) expect_equal(length(reporter$failures), 0, info = info) } }) } context("Testing test_that") test_test_that("false is false", { expect_that(FALSE, is_false()) }, failure_expected = FALSE) test_test_that("false is not true", { expect_that(FALSE, is_true()) }) test_test_that("true is not false", { expect_that(TRUE, is_false()) }) test_test_that("1 equals 1", { expect_that(1, equals(1)) }, failure_expected = FALSE) test_test_that("1 does not equal 2", { expect_that(1, equals(2)) }) test_test_that("fail fails", { fail() }) test_test_that("succeed suceeds", { suceeds() }, failure_expected = TRUE) test_test_that("random errors are caught", { function_that_doesnt_exist() }) f <- function() g() g <- function() stop("I made a mistake", call. = FALSE) test_test_that("errors are captured", { f() }) test_test_that("errors when looking for warnings propagte", { f <- function() stop("!") expect_warning(f()) }) test_test_that("NULL doesn't match text", { expect_error(expect_match(NULL, 'oeu'), "NULL does not match") }) testthat/tests/testthat/test-colour.r0000644000175100001440000000135712453002234017576 0ustar hornikuserscontext("Colours") test_that("We have colours if we want to", { op <- options( crayon.enabled = TRUE, testthat.use_colours = TRUE ) c1 <- crayon::has_style(colourise("X", "passed")) c2 <- crayon::has_style(colourise("X", "skipped")) c3 <- crayon::has_style(colourise("X", "error")) options(op) expect_true(c1) expect_true(c2) expect_true(c3) }) test_that("We don't have colours if we don't want to", { op <- options( crayon.enabled = TRUE, testthat.use_colours = FALSE ) c1 <- crayon::has_style(colourise("X", "passed")) c2 <- crayon::has_style(colourise("X", "skipped")) c3 <- crayon::has_style(colourise("X", "error")) options(op) expect_false(c1) expect_false(c2) expect_false(c3) }) testthat/tests/testthat/test-expectations.r0000644000175100001440000000700112602570054020777 0ustar hornikuserscontext("Expectations") test_that("errors are caught with throws_error", { res <- throws_error()(stop()) expect_that(res$passed, is_true()) res <- throws_error("Yes")(stop("Yes")) expect_that(res$passed, is_true()) res <- throws_error("Yes")(stop("No")) expect_that(res$passed, is_false()) }) test_that("failure to throw an error is a failure", { res <- throws_error()(log(1)) expect_that(res$passed, is_false()) res <- throws_error("error")(log(1)) expect_that(res$passed, is_false()) res <- throws_error()(NULL) expect_that(res$passed, is_false()) res <- throws_error("error")(NULL) expect_that(res$passed, is_false()) }) test_that("warnings are caught by gives_warning", { f <- function() { warning("a") } g <- function() { warning("a") warning("b") } expect_that(f(), gives_warning()) expect_that(f(), gives_warning("a")) expect_that(g(), gives_warning("a")) expect_that(g(), gives_warning("b")) }) test_that("messages are caught by shows_message", { f <- function() { message("a") } g <- function() { message("a") message("b") } expect_that(f(), shows_message()) expect_that(f(), shows_message("a")) expect_that(g(), shows_message("a")) expect_that(g(), shows_message("b")) }) test_that("matching expectations with NA", { f <- function() {} expect_error(f(), NA) expect_warning(f(), NA) expect_message(f(), NA) expect_output(f(), NA) }) test_that("shows_mesage / gives_warning work when no messages are generated", { expect_identical(gives_warning("a")(1)$failure_msg, "no warnings given") expect_identical(shows_message("a")(1)$failure_msg, "no messages shown") }) test_that("expect_output captures multiline code", { expect_output(cat("1\n2"), "1\n2") }) test_that("expect_output prints by default", { expect_output(1, "1") }) test_that("expect_equals fails with useful message if objects equal but not identical", { f <- function() x g <- function() x exp <- is_identical_to(f)(g) expect_false(exp$passed) expect_match(exp$failure, "not identical") }) test_that("extra arguments to matches passed onto grepl", { expect_match("te*st", "e*", fixed = TRUE) expect_match("test", "TEST", ignore.case = TRUE) expect_that("te*st", matches("e*", fixed = TRUE)) expect_that("test", matches("TEST", ignore.case = TRUE)) }) test_that("for messages, warnings, errors and output, ... passed onto grepl", { expect_output(print("X"), "x", ignore.case = TRUE) expect_message(message("X"), "x", ignore.case = TRUE) expect_warning(warning("X"), "x", ignore.case = TRUE) expect_error(stop("X"), "x", ignore.case = TRUE) }) test_that("expect_equal_to_reference correctly matches to a file", { expect_equal_to_reference(1, "one.rds") expect_that(1, equals_reference("one.rds")) }) test_that("first expect_equal_to_reference is successful", { expect_equal_to_reference(1, "two.rds") unlink("two.rds") }) test_that("expect_null checks for NULLs", { expect_null(NULL) }) test_that("takes_less_than verifies duration", { expect_that(1, takes_less_than(1)) }) test_that("expect_silent checks for out", { expect_silent("") }) test_that("expected_named verifies presence of names", { expect_named(c(a = 1)) }) test_that("expected_named verifies actual of names", { expect_named(c(a = 1), "a") }) test_that("expected_named optionally ignores case", { expect_named(c(a = 1), "A", ignore.case = TRUE) }) test_that("expected_named optionally ignores order", { expect_named(c(a = 1, b = 2), c("b", "a"), ignore.order = TRUE) }) testthat/tests/testthat/one.rds0000644000175100001440000000005412406573743016437 0ustar hornikusers‹‹àb```b`fdd`b2ø€˜ÑþCÝÁtestthat/tests/testthat/test-context.r0000644000175100001440000000212312453266353017765 0ustar hornikuserscontext("Contexts") env <- new.env() env$.packageName <- "testthat" CountReporter <- setRefClass("CountReporter", contains = "Reporter", where = env, fields = c("context_count", "test_count", "context_i", "test_i"), methods = list( initialize = function() { context_i <<- 0 context_count <<- 0 test_i <<- 0 test_count <<- 0 callSuper() }, start_context = function(desc) { context_count <<- context_count + 1 context_i <<- context_i + 1 }, end_context = function() { context_i <<- context_i - 1 stopifnot(context_i >= 0) }, start_test = function(desc) { test_count <<- test_count + 1 test_i <<- test_i + 1 }, end_test = function() { test_i <<- test_i - 1 stopifnot(test_i >= 0) } ) ) test_that("contexts are opened, then closed", { report <- CountReporter$new() test_file("context.r", report) expect_that(report$context_count, equals(2)) expect_that(report$context_i, equals(0)) expect_that(report$test_count, equals(4)) expect_that(report$test_i, equals(0)) }) testthat/tests/testthat/test-expect_that.r0000644000175100001440000000062312270001574020601 0ustar hornikuserscontext("expect_that") test_that("expect_that with vector info stops", { expect_error(expect_true(FALSE, info = c('info1', 'info2')), 'length\\(info\\) <= 1') expect_error(expect_true(FALSE, label = c('label1', 'label2')), 'length\\(label\\) <= 1') }) test_that("expect_that returns the result", { res <- expect_true(TRUE) expect_true(is.list(res) && res$passed && !res$error) })testthat/tests/testthat/test-helpers.R0000644000175100001440000000016312453751334017703 0ustar hornikuserscontext("helpers") # See helper-assign.R test_that("helpers run before tests", { expect_equal(abcdefghi, 10) }) testthat/tests/testthat/test-watcher.r0000644000175100001440000000522012602533536017733 0ustar hornikuserscontext("Watcher components") test_that("compare state works correctly", { loc <- tempfile("watcher") dir.create(loc) empty <- dir_state(loc) expect_that(length(empty), equals(0)) file.create(file.path(loc, "test-1.txt")) one <- dir_state(loc) expect_that(length(one), equals(1)) expect_that(basename(names(one)), equals("test-1.txt")) diff <- compare_state(empty, one) expect_that(diff$n, equals(1)) expect_that(basename(diff$added), equals("test-1.txt")) write.table(mtcars, file.path(loc, "test-1.txt")) diff <- compare_state(one, dir_state(loc)) expect_that(diff$n, equals(1)) expect_that(basename(diff$modified), equals("test-1.txt")) file.rename(file.path(loc, "test-1.txt"), file.path(loc, "test-2.txt")) diff <- compare_state(one, dir_state(loc)) expect_that(diff$n, equals(2)) expect_that(basename(diff$deleted), equals("test-1.txt")) expect_that(basename(diff$added), equals("test-2.txt")) diff <- compare_state(c(file1 = "62da2", file2 = "e14a6", file3 = "6e6dd"), c(file1 = "62da2", file2 = "e14a6", file21= "532fa", file3 = "3f4sa")) expect_that(diff$n, equals(2)) expect_that(basename(diff$added), equals("file21")) expect_that(basename(diff$modified), equals("file3")) }) test_that("watcher works correctly", { skip_on_cran() if (Sys.which("bash") == "") { skip("bash not available") } if (system("bash -c 'which touch'", ignore.stdout = TRUE) != 0L) { skip("touch (or which) not available") } loc <- tempfile("watcher", tmpdir = "/tmp") dir.create(loc) code_path = file.path(loc, "R") test_path = file.path(loc, "tests") dir.create(code_path) dir.create(test_path) delayed.bash.cmd <- function(command) { system(paste0("bash -c 'sleep 1;", command, "'"), wait=FALSE) } add.code.file <- function(file.name) { delayed.bash.cmd(paste0("touch ", file.path(code_path, file.name))) } remove.code.file <- function(file.name) { delayed.bash.cmd(paste0("rm ", file.path(code_path, file.name))) } test.added <- function(added, deleted, modified) { expect_that(length(added), equals(1)) expect_that(grepl("test1.R", added), is_true()) expect_that(length(deleted), equals(0)) expect_that(length(modified), equals(0)) FALSE } test.removed <- function(added, deleted, modified) { expect_that(length(added), equals(0)) expect_that(length(deleted), equals(1)) expect_that(grepl("test1.R", deleted), is_true()) expect_that(length(modified), equals(0)) FALSE } add.code.file("test1.R") watch(c(code_path, test_path), test.added) remove.code.file("test1.R") watch(c(code_path, test_path), test.removed) }) testthat/tests/testthat/test-negations.r0000644000175100001440000000164512247653466020306 0ustar hornikuserscontext("Negations") test_that("not(throws_error) fails with errors", { res <- not(throws_error())(stop()) expect_that(res$passed, is_false()) res <- not(throws_error("Yes"))(stop("Yes")) expect_that(res$passed, is_false()) res <- not(throws_error("Yes"))(stop("No")) expect_that(res$passed, is_true()) }) test_that("not(gives_warning) is the opposite of gives_warning", { h <- function() { warning("a") } res <- not(gives_warning())(h()) expect_false(res$passed) res <- not(gives_warning("a"))(h()) expect_false(res$passed) res <- not(gives_warning("c"))(h()) expect_true(res$passed) }) test_that("not(shows_message) is the opposite of shows_message", { h <- function() { message("a") } res <- not(shows_message())(h()) expect_false(res$passed) res <- not(shows_message("a"))(h()) expect_false(res$passed) res <- not(shows_message("c"))(h()) expect_true(res$passed) }) testthat/tests/testthat/context.r0000644000175100001440000000053712247653466017025 0ustar hornikuserscontext("First context.") test_that("Logical equivalence", { x <- TRUE expect_that(x, equals(TRUE)) }) test_that("Numerical equivalence", { x <- 1 expect_that(x, equals(1)) }) context("Second context.") test_that("A passing test", { expect_that(TRUE, equals(TRUE)) }) test_that("A failing test", { expect_that(TRUE, equals(FALSE)) }) testthat/tests/testthat/test-describe.r0000644000175100001440000000264412325225031020053 0ustar hornikuserscontext("Describe") someExternalVariable <- 1 describe("describe", { it("can contain nested describe blocks", { describe("addition", { it("should be able to add two numbers", { expect_equivalent(2, 1 + 1) }) describe("sub feature", { it("should also work", { expect_equivalent(2, 1 + 1) }) }) }) }) it("can have not yet implemented specs", { describe("Millennium Prize Problems", { it("can be shown that P != NP") }) }) it("has to have a description for the block", { expect_that(describe({ }), throws_error()) expect_that(describe("",{ }), throws_error()) expect_that(describe("test", { it() }), throws_error()) expect_that(describe("test", { it("") }), throws_error()) }) it("has to have a description of length 1", { expect_that(describe(c("a", "b"), {}), throws_error()) expect_that(describe("test", { it(c("a", "b")) }), throws_error()) }) someInternalVariable <- 1 it("should be possible to use variables from outer environments", { expect_equivalent(1, someExternalVariable) expect_equivalent(1, someInternalVariable) }) it("should not be possible to access variables from other specs (1)", { some_test_var <- 5 }) it("should not be possible to access variables from other specs (2)", { expect_that(exists("some_test_var"), is_false()) }) }) testthat/tests/testthat/helper-assign.R0000644000175100001440000000002012453750771020021 0ustar hornikusersabcdefghi <- 10 testthat/tests/testthat/test-source_dir.r0000644000175100001440000000101612247654663020445 0ustar hornikuserscontext("source_dir") test_that('source_dir()', { res <- source_dir('test_dir', pattern = 'hello', chdir = TRUE) expect_equal(res[[1]](), "Hello World") res <- source_dir(normalizePath('test_dir'), pattern = 'hello', chdir = TRUE) expect_equal(res[[1]](), "Hello World") res <- source_dir('test_dir', pattern = 'hello', chdir = FALSE) expect_equal(res[[1]](), "Hello World") res <- source_dir(normalizePath('test_dir'), pattern = 'hello', chdir = FALSE) expect_equal(res[[1]](), "Hello World") }) testthat/tests/testthat/test-bare.r0000644000175100001440000000061412606743704017215 0ustar hornikuserscontext("Bare expectations") expect_that(1, equals(1)) expect_equal(2, 2) expect_that(2, is_less_than(3)) expect_that(3, is_more_than(2)) expect_more_than(3, 2) expect_gt(3, 2) expect_gte(3, 3) expect_less_than(2, 3) expect_lt(2, 3) expect_lte(2, 2) # test_that("Error", { # stop("!") # }) # # test_that("Failure", { # expect_true(FALSE) # }) # # test_that("Skip", { # skip("Abc") # }) testthat/tests/testthat/test-environment.R0000644000175100001440000000026412453747623020615 0ustar hornikuserscontext("Environment") env <- new.env() setClass("MyClass", where = env) test_that("Cannot create S4 class without special behaviour", { setClass("MyClass2", where = env) }) testthat/tests/testthat/test-test_dir.r0000644000175100001440000000405412602555543020121 0ustar hornikuserscontext("test_dir") test_that('test_dir()', { res <- test_dir('test_dir', reporter = 'silent') df <- as.data.frame(res) expected <- data.frame(file = c("test-basic.r", "test-basic.r", "test-basic.r", "test-basic.r", "test-basic.r", "test-empty.r", "test-empty.r", "test-errors.r", "test-errors.r", "test-errors.r", "test-errors.r", "test-errors.r", "test-failures.r", "test-failures.r", "test-failures.r", "test-helper.r", "test-skip.r"), context = c("Basic", "Basic", "Basic", "Basic", "Basic", "empty", "empty", "error", "error", "error", "error", "error", "failures", "failures", "failures", "helper", "skip"), test = c("logical tests act as expected", "logical tests ignore attributes", "equality holds", "can't access variables from other tests 2", "can't access variables from other tests 1", "empty test", "empty test with error", "simple", "after one success", "after one failure", "in the test", "in expect_error", "just one failure", "one failure on two", "no failure", "helper test", "Skips skip"), nb = c(2L, 2L, 2L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 2L, 2L, 1L, 1L), failed = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 0L, 0L), skipped = c(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE), error = c(FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE), stringsAsFactors = FALSE) df$user <- df$system <- df$real <- NULL expect_identical(df, expected) }) test_that('test_dir() filter', { res <- test_dir('test_dir', reporter = 'silent', filter = 'basic|empty') df <- as.data.frame(res) expect_identical(unique(df$context), c("Basic", "empty")) }) test_that('test_dir() helpers', { res <- test_dir('test_dir', reporter = 'silent', filter = 'helper') df <- as.data.frame(res) expect_true(all(!df$error & df$failed == 0)) }) testthat/tests/testthat/test-reporter-list.r0000644000175100001440000000133412602565515021115 0ustar hornikuserscontext("ListReporter") test_that("ListReporter with test_file", { report <- ListReporter$new() report$start_file('context.r') test_file("context.r", report) res <- report$results expect_is(res, 'list') expect_equal(length(res), 4) contexts <- sapply(res, '[[', 'context') expect_equal(contexts, rep(c("First context.", "Second context."), each = 2)) last_results <- tail(res, 1)[[1]]$results expect_is(last_results, 'list') expect_equal(length(last_results), 1) fl <- last_results[[1]] expect_false(fl$passed) expect_false(fl$error) expect_equal(fl$success_msg, "TRUE equals FALSE") # test that file is set files <- sapply(res, '[[', 'file') expect_equal(files, rep(c("context.r"), 4)) }) testthat/tests/testthat/test-line_numbers.r0000644000175100001440000000632512602540552020763 0ustar hornikuserscontext("Line Numbers") test_that("line numbers are found and given to reporters", { ## a reporter that keeps its results GreedyReporter <- setRefClass("GreedyReporter", contains = "Reporter", where = environment(), fields = list(results = "list"), methods = list( add_result = function(result) { results[[length(results) + 1]] <<- result } )) # get the results supplied to the reporter by expectations .test_code <- function(code, reporter = GreedyReporter$new(), path = tempfile(fileext = ".R")) { code <- sub("^\\n", '', code) # strip first empty line if any writeLines(code, path) on.exit(unlink(path)) test_file(path, reporter) reporter$results } .test_and_fetch_lines <- function(code) { vapply(.test_code(code), function(x) x$srcref[1], 1L) } ### ==== EDGE CASES ==== # test file with errors, e.g. unknown function expect_error(.test_code('expect_toto()'), regexp = 'Error') # no errors are thrown inside test_that: test_that swallows errors code <- " test_that('simple', { # line1 expect_toto(FALSE) # line2 }) " res1 <- .test_code(code)[[1]] expect_true(res1$error && is.null(res1$srcref)) # unparsable test file expect_error(.test_code( 'bla)(')) # no tests res <- .test_code('1 + 1') expect_true(length(res) == 0) # test_that with no tests res <- .test_code('test_that("void", 1+1)') expect_true(length(res) == 0) # test without a test_that expect_equal(.test_and_fetch_lines('expect_true(FALSE)'), 1) ### ==== NORMAL CASES ==== # simple code <- " context('testing testFile') # line1 test_that('simple', { # line2 expect_true(FALSE) # line3 }) # line4 " expect_equal(.test_and_fetch_lines(code), 3) # in suppressMessages() code <- " test_that('simple', { # line1 suppressMessages(expect_true(FALSE)) # line2 }) " expect_equal(.test_and_fetch_lines(code), 2) # the expect_true is not called code <- " test_that('simple', { # line1 if (1 == 2) expect_true(TRUE) # line2 }) " res <- .test_code(code) expect_true(length(res) == 0) # in a loop code <- " test_that('simple', { # line1 for(i in 1:4) expect_true(TRUE) # line2 }) " expect_equal(.test_and_fetch_lines(code), rep(2,4)) # use case code <- " context('testing testFile') # line1 test_that('simple', { # line2 expect_true(FALSE) # line3 }) # line4 # line5 test_that('more complex', { # line 6 expect_true(TRUE) # line 7 expect_equal('toto', 'titi') # line 8 for(i in 1:4) { # line 9 expect_equal(i%%2,0) # line 10 }}) " expect_equal(.test_and_fetch_lines(code), c(3, 7, 8, 10, 10, 10, 10)) # test when options(keep.source=FALSE) code <- " context('testing testFile') # line1 test_that('simple', { # line2 expect_true(FALSE) # line3 }) # line4 " expect_equal(.test_and_fetch_lines(code), 3) }) testthat/src/0000755000175100001440000000000012607267544012734 5ustar hornikuserstestthat/src/reassign.c0000644000175100001440000000122612607267544014714 0ustar hornikusers#define USE_RINTERNALS #include #include #include SEXP reassign_function(SEXP name, SEXP env, SEXP old_fun, SEXP new_fun) { if (TYPEOF(name) != SYMSXP) error("name must be a symbol"); if (TYPEOF(env) != ENVSXP) error("env must be an environment"); if (TYPEOF(old_fun) != CLOSXP) error("old_fun must be a function"); if (TYPEOF(new_fun) != CLOSXP) error("new_fun must be a function"); SET_FORMALS(old_fun, FORMALS(new_fun)); SET_BODY(old_fun, BODY(new_fun)); SET_CLOENV(old_fun, CLOENV(new_fun)); DUPLICATE_ATTRIB(old_fun, new_fun); return R_NilValue; } SEXP duplicate_(SEXP x) { return duplicate(x); } testthat/NAMESPACE0000644000175100001440000000503312606743734013364 0ustar hornikusers# Generated by roxygen2 (4.1.1): do not edit by hand S3method(as.character,expectation) S3method(as.data.frame,testthat_results) S3method(compare,character) S3method(compare,default) S3method(compare,numeric) S3method(format,expectation) S3method(print,comparison) S3method(print,expectation) S3method(print,testthat_results) export(CheckReporter) export(ListReporter) export(MinimalReporter) export(MultiReporter) export(Reporter) export(RstudioReporter) export(SilentReporter) export(StopReporter) export(SummaryReporter) export(TapReporter) export(TeamcityReporter) export(auto_test) export(auto_test_package) export(compare) export(context) export(describe) export(equals) export(equals_reference) export(evaluate_promise) export(expect_equal) export(expect_equal_to_reference) export(expect_equivalent) export(expect_error) export(expect_false) export(expect_gt) export(expect_gte) export(expect_identical) export(expect_is) export(expect_less_than) export(expect_lt) export(expect_lte) export(expect_match) export(expect_message) export(expect_more_than) export(expect_named) export(expect_null) export(expect_output) export(expect_silent) export(expect_that) export(expect_true) export(expect_warning) export(expectation) export(fail) export(find_test_scripts) export(get_reporter) export(gives_warning) export(has_names) export(is.expectation) export(is_a) export(is_equivalent_to) export(is_false) export(is_identical_to) export(is_less_than) export(is_more_than) export(is_null) export(is_true) export(make_expectation) export(matches) export(not) export(prints_text) export(set_reporter) export(shows_message) export(skip) export(skip_if_not_installed) export(skip_on_appveyor) export(skip_on_cran) export(skip_on_os) export(skip_on_travis) export(source_dir) export(source_test_helpers) export(succeed) export(takes_less_than) export(test_check) export(test_dir) export(test_env) export(test_example) export(test_examples) export(test_file) export(test_package) export(test_that) export(throws_error) export(watch) export(with_mock) export(with_reporter) exportClasses(CheckReporter) exportClasses(ListReporter) exportClasses(MinimalReporter) exportClasses(MultiReporter) exportClasses(Reporter) exportClasses(RstudioReporter) exportClasses(SilentReporter) exportClasses(StopReporter) exportClasses(SummaryReporter) exportClasses(TapReporter) exportClasses(TeamcityReporter) importFrom(crayon,green) importFrom(crayon,red) importFrom(crayon,yellow) importFrom(methods,new) importFrom(methods,setRefClass) useDynLib(testthat,duplicate_) useDynLib(testthat,reassign_function) testthat/R/0000755000175100001440000000000012607267544012346 5ustar hornikuserstestthat/R/expectations.r0000644000175100001440000002253112606743704015236 0ustar hornikusers#' Expectation: does the object inherit from a class? #' #' Tests whether or not an object inherits from any of a list of classes. #' #' @inheritParams expect_that #' @param class character vector of class names #' @seealso \code{\link{inherits}} #' @family expectations #' @export #' @examples #' expect_is(1, "numeric") #' a <- matrix(1:10, nrow = 5) #' expect_is(a, "matrix") #' #' expect_is(mtcars, "data.frame") #' # alternatively for classes that have an is method #' expect_true(is.data.frame(mtcars)) expect_is <- function(object, class, info = NULL, label = NULL) { if (is.null(label)) { label <- find_expr("object") } expect_that(object, is_a(class), info, label) } #' @export #' @rdname oldskool is_a <- function(class) { function(x) { actual_s <- paste0(class(x), collapse = ", ") class_s <- paste(class, collapse = ", ") expectation( inherits(x, class), paste0("inherits from ", actual_s, " not ", class_s), paste0("inherits from ", class_s) ) } } #' Expectation: is the object true/false? #' #' These are fall-back expectations that you can use when none of the other #' more specific expectations apply. The disadvantage is that you may get #' a less informative error message. #' #' Attributes are ignored. #' #' @seealso \code{\link{is_false}} for complement #' @family expectations #' @export #' @examples #' expect_true(2 == 2) #' # Failed expectations will throw an error #' \dontrun{ #' expect_true(2 != 2) #' } #' expect_true(!(2 != 2)) #' # or better: #' expect_false(2 != 2) #' #' a <- 1:3 #' expect_true(length(a) == 3) #' # but better to use more specific expectation, if available #' expect_equal(length(a), 3) expect_true <- function(object, info = NULL, label = NULL) { if (is.null(label)) { label <- find_expr("object") } expect_that(object, is_true(), info, label) } #' @export #' @rdname expect_true #' @inheritParams expect_that expect_false <- function(object, info = NULL, label = NULL) { if (is.null(label)) { label <- find_expr("object") } expect_that(object, is_false(), info, label) } #' @export #' @rdname oldskool is_true <- function() { function(x) { expectation( identical(as.vector(x), TRUE), "isn't true", "is true" ) } } #' @export #' @rdname oldskool is_false <- function() { function(x) { expectation( identical(as.vector(x), FALSE), "isn't false", "is false" ) } } #' Expectation: is the object NULL? #' #' @family expectations #' @inheritParams expect_that #' @export #' @examples #' expect_null(NULL) expect_null <- function(object, info = NULL, label = NULL) { if (is.null(label)) { label <- find_expr("object") } expect_that(object, is_null(), info, label) } #' @export #' @rdname oldskool is_null <- function() { function(x) { expectation( identical(x, NULL), "isn't null", "is null" ) } } #' Expectation: does expression take less than a fixed amount of time to run? #' #' This is useful for performance regression testing. #' #' @family expectations #' @keywords internal #' @export #' @param amount maximum duration in seconds takes_less_than <- function(amount) { function(expr) { duration <- system.time(force(expr))["elapsed"] expectation( duration < amount, paste0("took ", duration, " seconds, which is more than ", amount), paste0("took ", duration, " seconds, which is less than ", amount) ) } } #' Expectation: does object have names? #' #' You can either check for the presence of names (leaving \code{expected} #' blank), specific names (by suppling a vector of names), or absence of #' names (with \code{NULL}). #' #' @inheritParams expect_that #' @param expected Character vector of expected names. Leave missing to #' match any names. Use \code{NULL} to check for absence of names. #' @param ignore.order If \code{TRUE}, sorts names before comparing to #' ignore the effect of order. #' @param ignore.case If \code{TRUE}, lowercases all names to ignore the #' effect of case. #' @param ... Other arguments passed onto \code{has_names}. #' @family expectations #' @export #' @examples #' x <- c(a = 1, b = 2, c = 3) #' expect_named(x) #' expect_named(x, c("a", "b", "c")) #' #' # Use options to control sensitivity #' expect_named(x, c("B", "C", "A"), ignore.order = TRUE, ignore.case = TRUE) #' #' # Can also check for the absence of names with NULL #' z <- 1:4 #' expect_named(z, NULL) expect_named <- function(object, expected, ignore.order = FALSE, ignore.case = FALSE, info = NULL, label = NULL) { if (is.null(label)) { label <- find_expr("object") } expect_that(object, has_names(expected, ignore.order = ignore.order, ignore.case = ignore.case), info = info, label = label) } #' @export #' @rdname oldskool has_names <- function(expected, ignore.order = FALSE, ignore.case = FALSE) { if (missing(expected)) { function(x) { expectation( !identical(names(x), NULL), paste0("does not have names"), paste0("has names") ) } } else { expected <- normalise_names(expected, ignore.order, ignore.case) function(x) { x_names <- normalise_names(names(x), ignore.order, ignore.case) expectation( identical(x_names, expected), paste0("names don't match ", paste0(expected, collapse = ", ")), paste0("names as expected") ) } } } normalise_names <- function(x, ignore.order = FALSE, ignore.case = FALSE) { if (is.null(x)) return() if (ignore.order) x <- sort(x) if (ignore.case) x <- tolower(x) x } #' Expectation: is returned value less or greater than specified value? #' #' This is useful for ensuring returned value is below a ceiling or above #' a floor. #' #' @inheritParams expect_that #' @param expected Expected value #' @param label For full form, label of expected object used in error #' messages. Useful to override default (deparsed expected expression) when #' doing tests in a loop. For short cut form, object label. When #' \code{NULL}, computed from deparsed object. #' @param expected.label Equivalent of \code{label} for shortcut form. #' @param ... other values passed to \code{\link{all.equal}} #' @family expectations #' @examples #' a <- 9 #' expect_less_than(a, 10) #' #' \dontrun{ #' expect_less_than(11, 10) #' } #' #' a <- 11 #' expect_more_than(a, 10) #' \dontrun{ #' expect_more_than(9, 10) #' } #' @name expect-compare NULL #' @export #' @rdname oldskool is_less_than <- function(expected, label = NULL, ...) { if (is.null(label)) { label <- find_expr("expected") } else if (!is.character(label) || length(label) != 1) { label <- deparse(label) } function(actual) { diff <- expected - actual expectation( diff > 0, paste0("not less than ", label, ". Difference: ", format(diff)), paste0("is less than ", label) ) } } #' @export #' @rdname expect-compare expect_less_than <- function(object, expected, ..., info = NULL, label = NULL, expected.label = NULL) { if (is.null(label)) { label <- find_expr("object") } if (is.null(expected.label)) { expected.label <- find_expr("expected") } expect_that(object, is_less_than(expected, label = expected.label, ...), info = info, label = label) } #' @export #' @rdname expect-compare expect_lt <- function(object, expected) { label_object <- find_expr("object") label_expected <- find_expr("expected") diff <- expected - object expect( diff > 0, paste0(label_object, " is not less than ", label_expected, ". Difference: ", format(diff)) ) } #' @export #' @rdname expect-compare expect_lte <- function(object, expected) { label_object <- find_expr("object") label_expected <- find_expr("expected") diff <- expected - object expect( diff >= 0, paste0(label_object, " is strictly more than ", label_expected, ". Difference: ", format(diff)) ) } #' @export #' @rdname oldskool is_more_than <- function(expected, label = NULL, ...) { if (is.null(label)) { label <- find_expr("expected") } else if (!is.character(label) || length(label) != 1) { label <- deparse(label) } function(actual) { diff <- expected - actual expectation( diff < 0, paste0("not more than ", label, ". Difference: ", format(diff)), paste0("is more than ", label) ) } } #' @export #' @rdname expect-compare expect_more_than <- function(object, expected, ..., info = NULL, label = NULL, expected.label = NULL) { if (is.null(label)) { label <- find_expr("object") } if (is.null(expected.label)) { expected.label <- find_expr("expected") } expect_that(object, is_more_than(expected, label = expected.label, ...), info = info, label = label) } #' @export #' @rdname expect-compare expect_gt <- function(object, expected) { label_object <- find_expr("object") label_expected <- find_expr("expected") diff <- expected - object expect( diff < 0, paste0(label_object, " is more than ", label_expected, ". Difference: ", format(diff)) ) } #' @export #' @rdname expect-compare expect_gte <- function(object, expected) { label_object <- find_expr("object") label_expected <- find_expr("expected") diff <- expected - object expect( diff <= 0, paste0(label_object, " is strictly less than ", label_expected, ". Difference: ", format(diff)) ) } testthat/R/reporter-check.R0000644000175100001440000000464612524710715015407 0ustar hornikusers#' @include reporter.r NULL #' Check reporter: 13 line summary of problems #' #' \code{R CMD check} displays only the last 13 lines of the result, so this #' report is design to ensure that you see something useful there. #' #' @export #' @export CheckReporter #' @aliases CheckReporter #' @keywords debugging #' @param ... Arguments used to initialise class CheckReporter <- setRefClass("CheckReporter", contains = "Reporter", fields = list( "failures" = "list", "n_fail" = "integer", "n_ok" = "integer", "n_skip" = "integer" ), methods = list( start_reporter = function() { failures <<- list() n_ok <<- 0L n_skip <<- 0L n_fail <<- 0L }, add_result = function(result) { callSuper(result) if (result$skipped) { n_skip <<- n_skip + 1L return() } if (result$passed) { n_ok <<- n_ok + 1L return() } n_fail <<- n_fail + 1L result$test <- if (is.null(test)) "(unknown)" else test failures[[n_fail]] <<- result cat(failure_summary(result, n_fail), "\n\n", sep = "") }, end_reporter = function() { rule <- paste0(rep("=", getOption("width") - 16), collapse = "") cat("testthat results ", rule, "\n", sep = "") cat( "OK: ", n_ok, " ", "SKIPPED: ", n_skip, " ", "FAILED: ", n_fail, "\n", sep = "" ) if (n_fail == 0) return() if (n_fail > 10) { show <- failures[1:9] } else { show <- failures } fails <- vapply(failures, failure_header, character(1)) if (n_fail > 10) { fails <- c(fails, "...") } labels <- format(paste0(1:length(show), ".")) cat(paste0(labels, " ", fails, collapse = "\n")) cat("\n\n") stop("testthat unit tests failed", call. = FALSE) } ) ) failure_summary <- function(x, label, width = getOption("width")) { header <- paste0(label, ". ", failure_header(x)) linewidth <- ifelse(nchar(header) > width, 0, width - nchar(header)) line <- paste(rep("-", linewidth), collapse = "") paste0( colourise(header, "error"), line, "\n", x$failure_msg ) } failure_header <- function(x) { type <- if (x$error) "Error" else "Failure" ref <- x$srcref if (is.null(ref)) { location <- "" } else { location <- paste0(" (at ", attr(ref, "srcfile")$filename, "#", ref[1], ")") } paste0(type, location, ": ", x$test, " ") } testthat/R/reporter-multi.r0000644000175100001440000000227112247655050015516 0ustar hornikusers#' @include reporter.r NULL #' Multi reporter: combine several reporters in one. #' #' This reporter is useful to use several reporters at the same time, e.g. #' adding a custom reporter without removing the current one. #' #' @export #' @export MultiReporter #' @aliases MultiReporter #' @keywords debugging #' @param ... Arguments used to initialise class MultiReporter <- setRefClass("MultiReporter", contains = "Reporter", fields = list(reporters = 'list'), methods = list( start_reporter = function() { .oapply(reporters, 'start_reporter') }, start_context = function(desc) { .oapply(reporters, 'start_context', desc) }, start_test = function(desc) { .oapply(reporters, 'start_test', desc) }, add_result = function(result) { .oapply(reporters, 'add_result', result) }, end_test = function() { .oapply(reporters, 'end_test') }, end_context = function() { .oapply(reporters, 'end_context') }, end_reporter = function() { .oapply(reporters, 'end_reporter') } ) ) .oapply <- function(objects, method, ...) { for (o in objects) eval(substitute(o$FUN(...), list(FUN = method, ...))) } testthat/R/expectations-old.R0000644000175100001440000000070412453012706015737 0ustar hornikusers#' Old-style expectations. #' #' Initial testthat used a style of testing that looked like #' \code{expect_that(a, equals(b)))} this allowed expectations to read like #' English sentences, but was verbose and a bit too cutesy. This style #' will continue to work but has been soft-deprecated - it is no longer #' documented, and new expectations will only use the new style #' \code{expect_equal(a, b)}. #' #' @name oldskool #' @keywords internal NULL testthat/R/mock.r0000644000175100001440000000730012602555353013453 0ustar hornikusers#' Mock functions in a package. #' #' Executes code after temporarily substituting implementations of package #' functions. This is useful for testing code that relies on functions that are #' slow, have unintended side effects or access resources that may not be #' available when testing. #' #' This works by using some C code to temporarily modify the mocked function \emph{in place}. #' On exit (regular or error), all functions are restored to their previous state. #' This is somewhat abusive of R's internals, and is still experimental, so use with care. #' #' Primitives (such as \code{\link[base]{interactive}}) cannot be mocked, but this can be #' worked around easily by defining a wrapper function with the same name. #' #' @param ... named parameters redefine mocked functions, unnamed parameters #' will be evaluated after mocking the functions #' @param .env the environment in which to patch the functions, #' defaults to the top-level environment. A character is interpreted as #' package name. #' @return The result of the last unnamed parameter #' @references Suraj Gupta (2012): \href{http://obeautifulcode.com/R/How-R-Searches-And-Finds-Stuff}{How R Searches And Finds Stuff} #' @export #' @examples #' with_mock( #' all.equal = function(x, y, ...) TRUE, #' expect_equal(2 * 3, 4), #' .env = "base" #' ) #' with_mock( #' `base::identical` = function(x, y, ...) TRUE, #' `base::all.equal` = function(x, y, ...) TRUE, #' expect_equal(x <- 3 * 3, 6), #' expect_identical(x + 4, 9) #' ) #' throws_error()(expect_equal(3, 5)) #' throws_error()(expect_identical(3, 5)) with_mock <- function(..., .env = topenv()) { new_values <- eval(substitute(alist(...))) mock_qual_names <- names(new_values) if (all(mock_qual_names == "")) { warning("Not mocking anything. Please use named parameters to specify the functions you want to mock.") code_pos <- TRUE } else { code_pos <- (mock_qual_names == "") } code <- new_values[code_pos] mocks <- extract_mocks(new_values = new_values[!code_pos], .env = .env, eval_env = parent.frame()) on.exit(lapply(mocks, reset_mock), add = TRUE) lapply(mocks, set_mock) # Evaluate the code ret <- invisible(NULL) for (expression in code) { ret <- eval(expression, parent.frame()) } ret } pkg_rx <- ".*[^:]" colons_rx <- "::(?:[:]?)" name_rx <- ".*" pkg_and_name_rx <- sprintf("^(?:(%s)%s)?(%s)$", pkg_rx, colons_rx, name_rx) extract_mocks <- function(new_values, .env, eval_env = parent.frame()) { if (is.environment(.env)) .env <- environmentName(.env) mock_qual_names <- names(new_values) lapply( stats::setNames(nm = mock_qual_names), function(qual_name) { pkg_name <- gsub(pkg_and_name_rx, "\\1", qual_name) name <- gsub(pkg_and_name_rx, "\\2", qual_name) if (pkg_name == "") pkg_name <- .env env <- asNamespace(pkg_name) if (!exists(name, envir = env, mode = "function")) stop("Function ", name, " not found in environment ", environmentName(env), ".") mock(name = name, env = env, new = eval(new_values[[qual_name]], eval_env, eval_env)) } ) } #' @useDynLib testthat duplicate_ mock <- function(name, env, new) { target_value <- get(name, envir = env, mode = "function") structure(list( env = env, name = as.name(name), orig_value = .Call(duplicate_, target_value), target_value = target_value, new_value = new), class = "mock") } #' @useDynLib testthat reassign_function set_mock <- function(mock) { .Call(reassign_function, mock$name, mock$env, mock$target_value, mock$new_value) } #' @useDynLib testthat reassign_function reset_mock <- function(mock) { .Call(reassign_function, mock$name, mock$env, mock$target_value, mock$orig_value) } testthat/R/expect-that.r0000644000175100001440000000707512602567175014766 0ustar hornikusers#' Expect that a condition holds. #' #' An old style of testing that's no longer encouraged. #' #' @param object object to test #' @param condition, a function that returns whether or not the condition #' is met, and if not, an error message to display. #' @param label object label. When \code{NULL}, computed from deparsed object. #' @param info extra information to be included in the message (useful when #' writing tests in loops). #' @return the (internal) expectation result as an invisible list #' @keywords internal #' @export #' @seealso \code{\link{fail}} for an expectation that always fails. #' @examples #' expect_that(5 * 2, equals(10)) #' expect_that(sqrt(2) ^ 2, equals(2)) #' \dontrun{ #' expect_that(sqrt(2) ^ 2, is_identical_to(2)) #' } expect_that <- function(object, condition, info = NULL, label = NULL) { stopifnot(length(info) <= 1, length(label) <= 1) label <- label %||% find_expr("object") results <- condition(object) stopifnot(is.expectation(results)) results$srcref <- find_test_srcref() if (!is.null(label)) { results$failure_msg <- paste0(label, " ", results$failure_msg) results$success_msg <- paste0(label, " ", results$success_msg) } if (!is.null(info)) { results$failure_msg <- paste0(results$failure_msg, "\n", info) results$success_msg <- paste0(results$success_msg, "\n", info) } get_reporter()$add_result(results) invisible(results) } # find the srcref of the test call, or NULL find_test_srcref <- function() { # candidate frame is not in the testthat package, # its call matches expect_* and has parsing info attached .is_test_frame <- function(i) { # is enclosure of the frame containing the call inside testthat package ? inside <- identical(environmentName(parent.env(sys.frame(i - 1))) , 'testthat') match_expect <- any(grepl('expect_', sys.call(i))) has_srcref <- !is.null(attr(sys.call(i), 'srcref')) !inside && match_expect && has_srcref } # find the first call (tracing back) that seems good nbe <- Find(.is_test_frame, seq_len(sys.nframe()), right = TRUE) if (length(nbe) == 0 || is.na(nbe)) { return(NULL) } cc <- sys.call(nbe) src <- attr(cc, 'srcref') if (is.null(src)) warning("could not get srcref") src } #' A default expectation that always fails. #' #' The fail function forces a test to fail. This is useful if you want to #' test a pre-condition ' #' #' @param message a string to display. #' @export #' @examples #' \dontrun{ #' test_that("this test fails", fail()) #' } fail <- function(message = "Failure has been forced") { results <- expectation(FALSE, message, "This always succeeds") get_reporter()$add_result(results) invisible() } #' A default expectation that always succeeds. #' #' @param message a string to display. #' @export #' @examples #' \dontrun{ #' test_that("this test fails", fail()) #' } succeed <- function(message = "Success has been forced") { results <- expectation(TRUE, message, "This always fails") get_reporter()$add_result(results) invisible() } #' Negate an expectation #' #' This negates an expectation, making it possible to express that you #' want the opposite of a standard expectation. This function is soft-deprecated #' and will be removed in a future version. #' #' @param f an existing expectation function #' @keywords internal #' @export #' @examples #' x <- 1 #' expect_that(x, equals(1)) #' expect_that(x, not(equals(2))) #' \dontrun{ #' expect_that(x, equals(2)) #' expect_that(x, not(equals(1))) #' } not <- function(f) { stopifnot(is.function(f)) function(...) { res <- f(...) negate(res) } } testthat/R/expectations-matches.R0000644000175100001440000001457312603002262016607 0ustar hornikusers#' Expectation: does string/output/message/warning/error match a regular expression? #' #' @inheritParams expect_that #' @param regexp regular expression to test against. If omitted, #' just asserts that code produces some output, messsage, warning or #' error. Alternatively, you can specify \code{NA} to indicate that #' there should be no output, messages, warnings or errors. #' @param all should all elements of actual value match \code{regexp} (TRUE), #' or does only one need to match (FALSE) #' @param ... Additional arguments passed on to \code{\link{grepl}}, e.g. #' \code{ignore.case} or \code{fixed}. #' @family expectations #' @examples #' expect_match("Testing is fun", "fun") #' expect_match("Testing is fun", "f.n") #' #' # Output -------------------------------------------------------------------- #' str(mtcars) #' expect_output(str(mtcars), "32 obs") #' expect_output(str(mtcars), "11 variables") #' #' # You can use the arguments of grepl to control the matching #' expect_output(str(mtcars), "11 VARIABLES", ignore.case = TRUE) #' expect_output(str(mtcars), "$ mpg", fixed = TRUE) #' #' # Messages ------------------------------------------------------------------ #' #' f <- function(x) { #' if (x < 0) message("*x* is already negative") #' -x #' } #' expect_message(f(-1)) #' expect_message(f(-1), "already negative") #' expect_message(f(1), NA) #' #' # You can use the arguments of grepl to control the matching #' expect_message(f(-1), "*x*", fixed = TRUE) #' expect_message(f(-1), "NEGATIVE", ignore.case = TRUE) #' #' # Warnings -------------------------------------------------------------------- #' f <- function(x) { #' if (x < 0) warning("*x* is already negative") #' -x #' } #' expect_warning(f(-1)) #' expect_warning(f(-1), "already negative") #' expect_warning(f(1), NA) #' #' # You can use the arguments of grepl to control the matching #' expect_warning(f(-1), "*x*", fixed = TRUE) #' expect_warning(f(-1), "NEGATIVE", ignore.case = TRUE) #' #' #' # Errors -------------------------------------------------------------------- #' f <- function() stop("My error!") #' expect_error(f()) #' expect_error(f(), "My error!") #' #' # You can use the arguments of grepl to control the matching #' expect_error(f(), "my error!", ignore.case = TRUE) #' #' @name matching-expectations NULL #' @export #' @rdname matching-expectations expect_match <- function(object, regexp, ..., all = TRUE, info = NULL, label = NULL) { if (is.null(label)) { label <- find_expr("object") } expect_that(object, matches(regexp, all = all, ...), info = info, label = label) } #' @export #' @rdname oldskool matches <- function(regexp, all = TRUE, ...) { stopifnot(is.character(regexp), length(regexp) == 1) function(char) { matches <- grepl(regexp, char, ...) if (length(char) > 1) { values <- paste0("Actual values:\n", paste0("* ", encodeString(char), collapse = "\n")) } else { values <- paste0("Actual value: \"", encodeString(char), "\"") } expectation( length(matches) > 0 && if (all) all(matches) else any(matches), paste0("does not match '", encodeString(regexp), "'. ", values), paste0("matches '", encodeString(regexp), "'") ) } } #' @export #' @rdname matching-expectations expect_output <- function(object, regexp, ..., info = NULL, label = NULL) { if (is.null(label)) { label <- find_expr("object") } expect_that(object, prints_text(regexp, ...), info = info, label = label) } #' @export #' @rdname oldskool prints_text <- function(regexp, ...) { function(expr) { output <- evaluate_promise(expr, print = TRUE)$output if (identical(regexp, NA)) { return(expectation( !is.null(output), paste0("produced output: ", encodeString(output)), "didn't produce output" )) } matches(regexp, ...)(output) } } #' @export #' @rdname matching-expectations expect_error <- function(object, regexp = NULL, ..., info = NULL, label = NULL) { if (is.null(label)) { label <- find_expr("object") } expect_that(object, throws_error(regexp, ...), info = info, label = label) } #' @export #' @rdname oldskool throws_error <- function(regexp = NULL, ...) { function(expr) { res <- try(force(expr), TRUE) no_error <- !inherits(res, "try-error") if (no_error) { return(expectation( identical(regexp, NA), "code raised an error", "code didn't raise an error" )) } if (!is.null(regexp)) { matches(regexp, ...)(res) } else { expectation(TRUE, "no error thrown", "threw an error") } } } #' @export #' @rdname matching-expectations expect_warning <- function(object, regexp = NULL, ..., all = FALSE, info = NULL, label = NULL) { if (is.null(label)) { label <- find_expr("object") } expect_that(object, gives_warning(regexp, ..., all = all), info = info, label = label) } #' @export #' @rdname oldskool gives_warning <- function(regexp = NULL, all = FALSE, ...) { function(expr) { warnings <- evaluate_promise(expr)$warnings if (identical(regexp, NA)) { expectation( length(warnings) == 0, paste0("expected no warnings:\n", paste("* ", warnings, collapse = "\n")), "no warnings" ) } else if (!is.null(regexp) && length(warnings) > 0) { matches(regexp, all = all, ...)(warnings) } else { expectation( length(warnings) > 0, "no warnings given", paste0(length(warnings), " warnings created") ) } } } #' @export #' @rdname matching-expectations expect_message <- function(object, regexp = NULL, ..., all = FALSE, info = NULL, label = NULL) { if (is.null(label)) { label <- find_expr("object") } expect_that(object, shows_message(regexp, all = all, ...), info = info, label = label) } #' @export #' @rdname oldskool shows_message <- function(regexp = NULL, all = FALSE, ...) { function(expr) { messages <- evaluate_promise(expr)$messages if (identical(regexp, NA)) { expectation( length(messages) == 0, paste0("expected no message, got:\n", paste("* ", messages, collapse = "\n")), paste0("no messages") ) } else if (!is.null(regexp) && length(messages) > 0) { matches(regexp, all = all, ...)(messages) } else { expectation( length(messages) > 0, "no messages shown", paste0(length(messages), " messages shown") ) } } } testthat/R/make-expectation.r0000644000175100001440000000135412122414275015755 0ustar hornikusers#' Make an equality test. #' #' This a convenience function to make a expectation that checks that #' input stays the same. #' #' @param x a vector of values #' @param expectation the type of equality you want to test for #' (\code{equals}, \code{is_equivalent_to}, \code{is_identical_to}) #' @export #' @examples #' x <- 1:10 #' make_expectation(x) #' #' make_expectation(mtcars$mpg) #' #' df <- data.frame(x = 2) #' make_expectation(df) make_expectation <- function(x, expectation = "equals") { obj <- substitute(x) expectation <- match.arg(expectation, c("equals", "is_equivalent_to", "is_identical_to")) dput(substitute(expect_that(obj, expectation(values)), list(obj = obj, expectation = as.name(expectation), values = x))) } testthat/R/auto-test.r0000644000175100001440000001015112602544235014442 0ustar hornikusers#' Watches code and tests for changes, rerunning tests as appropriate. #' #' The idea behind \code{auto_test} is that you just leave it running while #' you develop your code. Everytime you save a file it will be automatically #' tested and you can easily see if your changes have caused any test #' failures. #' #' The current strategy for rerunning tests is as follows: #' #' \itemize{ #' \item if any code has changed, then those files are reloaded and all tests #' rerun #' \item otherwise, each new or modified test is run #' } #' In the future, \code{auto_test} might implement one of the following more #' intelligent alternatives: #' #' \itemize{ #' \item Use codetools to build up dependency tree and then rerun tests only #' when a dependency changes. #' #' \item Mimic ruby's autotest and rerun only failing tests until they pass, #' and then rerun all tests. #' } # #' @seealso \code{\link{auto_test_package}} #' @export #' @param code_path path to directory containing code #' @param test_path path to directory containing tests #' @param reporter test reporter to use #' @param env environment in which to execute test suite. #' @keywords debugging auto_test <- function(code_path, test_path, reporter = "summary", env = test_env()) { reporter <- find_reporter(reporter) code_path <- normalizePath(code_path) test_path <- normalizePath(test_path) # Start by loading all code and running all tests source_dir(code_path, env = env) test_dir(test_path, env = env, reporter = reporter$copy()) # Next set up watcher to monitor changes watcher <- function(added, deleted, modified) { changed <- normalizePath(c(added, modified)) tests <- changed[starts_with(changed, test_path)] code <- changed[starts_with(changed, code_path)] if (length(code) > 0) { # Reload code and rerun all tests cat("Changed code: ", paste0(basename(code), collapse = ", "), "\n") cat("Rerunning all tests\n") source_dir(code_path, env = env) test_dir(test_path, env = env, reporter = reporter$copy()) } else if (length(tests) > 0) { # If test changes, rerun just that test cat("Rerunning tests: ", paste0(basename(tests), collapse = ", "), "\n") test_files(tests, env = env, reporter = reporter$copy()) } TRUE } watch(c(code_path, test_path), watcher) } #' Watches a package for changes, rerunning tests as appropriate. #' #' @param pkg path to package #' @export #' @param reporter test reporter to use #' @keywords debugging #' @seealso \code{\link{auto_test}} for details on how method works auto_test_package <- function(pkg = ".", reporter = "summary") { if (!requireNamespace("devtools", quietly = TRUE)) { stop("devtools required to run auto_test_package(). Please install.", call. = FALSE) } pkg <- devtools::as.package(pkg) reporter <- find_reporter(reporter) code_path <- file.path(pkg$path, "R") test_path <- file.path(pkg$path, "tests", "testthat") # Start by loading all code and running all tests env <- devtools::load_all(pkg)$env devtools::with_envvar( devtools::r_env_vars(), test_dir(test_path, env = env, reporter = reporter$copy()) ) # Next set up watcher to monitor changes watcher <- function(added, deleted, modified) { changed <- normalizePath(c(added, modified)) tests <- changed[starts_with(changed, test_path)] code <- changed[starts_with(changed, code_path)] if (length(code) > 0) { # Reload code and rerun all tests cat("Changed code: ", paste0(basename(code), collapse = ", "), "\n") cat("Rerunning all tests\n") env <<- devtools::load_all(pkg, quiet = TRUE)$env devtools::with_envvar( devtools::r_env_vars(), test_dir(test_path, env = env, reporter = reporter$copy()) ) } else if (length(tests) > 0) { # If test changes, rerun just that test cat("Rerunning tests: ", paste0(basename(tests), collapse = ", "), "\n") devtools::with_envvar( devtools::r_env_vars(), test_files(tests, env = env, reporter = reporter$copy()) ) } TRUE } watch(c(code_path, test_path), watcher) } testthat/R/evaluate-promise.r0000644000175100001440000000355512453004714016006 0ustar hornikusers#' Evaluate a promise, capturing all types of output. #' #' This uses \code{\link[evaluate]{evaluate}} a promise, returning the #' result, test, messages and warnings that the code creates in a list. #' It is used to evaluate code for all test that tests, ensuring that #' (as much as possible) any spurious output is suppressed during the #' testing process. #' #' @param code Code to evaluate. This should be an unevaluated expression. #' @param print If \code{TRUE} and the result of evaluating \code{code} is #' visible this will print the result, ensuring that the output of printing #' the object is included in the overall output #' @export #' @return A list containing #' \item{result}{The result of the function} #' \item{output}{A string containing all the output from the function} #' \item{warnings}{A character vector containing the text from each warning} #' \item{messages}{A character vector containing the text from each message} #' @examples #' evaluate_promise({ #' print("1") #' message("2") #' warning("3") #' 4 #' }) evaluate_promise <- function(code, print = FALSE) { warnings <- character() wHandler <- function(w) { warnings <<- c(warnings, w$message) invokeRestart("muffleWarning") } messages <- character() mHandler <- function(m) { messages <<- c(messages, m$message) invokeRestart("muffleMessage") } temp <- file() on.exit(close(temp)) result <- with_sink(temp, withCallingHandlers( withVisible(code), warning = wHandler, message = mHandler ) ) if (result$visible && print) { with_sink(temp, print(result$value)) } output <- paste0(readLines(temp, warn = FALSE), collapse = "\n") list( result = result$value, output = output, warnings = warnings, messages = messages ) } with_sink <- function(connection, code, ...) { sink(connection, ...) on.exit(sink()) code } testthat/R/colour-text.r0000644000175100001440000000064512522163061015004 0ustar hornikusers# fix the check NOTE:Namespace in Imports field not imported from: 'crayon' #' @importFrom crayon green yellow red testthat_colours <- list( passed = green, skipped = yellow, error = red ) colourise <- function(text, as = c("passed", "skipped", "error")) { colour_config <- getOption("testthat.use_colours", TRUE) if (!isTRUE(colour_config)) return(text) as <- match.arg(as) testthat_colours[[as]](text) } testthat/R/reporter-summary.r0000644000175100001440000000513412606743704016065 0ustar hornikusers#' @include reporter.r NULL #' Test reporter: summary of errors. #' #' This is the most useful reporting reporter as it lets you know both which #' tests have run successfully, as well as fully reporting information about #' failures and errors. It is the default reporting reporter used by #' \code{\link{test_dir}} and \code{\link{test_file}}. #' #' You can use the \code{max_reports} field to control the maximum number #' of detailed reports produced by this reporter. This is useful when running #' with \code{\link{auto_test}} #' #' As an additional benefit, this reporter will praise you from time-to-time #' if all your tests pass. #' #' @export #' @export SummaryReporter #' @aliases SummaryReporter #' @keywords debugging #' @param ... Arguments used to initialise class SummaryReporter <- setRefClass("SummaryReporter", contains = "Reporter", fields = list( "failures" = "list", "n" = "integer", "has_tests" = "logical", "max_reports" = "numeric", "show_praise" = "logical"), methods = list( initialize = function(max_reports = Inf, ...) { max_reports <<- max_reports show_praise <<- TRUE callSuper(...) }, start_context = function(desc) { cat(desc, ": ") }, end_context = function() { cat("\n") }, start_reporter = function() { failures <<- list() has_tests <<- FALSE n <<- 0L }, add_result = function(result) { callSuper(result) has_tests <<- TRUE if (result$skipped) { cat(colourise("S", "skipped")) return() } if (result$passed) { cat(colourise(".", "passed")) return() } if (n + 1 > length(labels) || n + 1 > max_reports) { cat(colourise("F", "error")) } else { n <<- n + 1L result$test <- if (is.null(test)) "(unknown)" else test failures[[n]] <<- result cat(colourise(labels[n], "error")) } }, end_reporter = function() { if (n == 0) { if (!has_tests) return() cat("\n") if (show_praise && runif(1) < 0.1) { cat(colourise(praise(), "passed"), "\n") } else { cat(colourise("DONE", "passed"), "\n") } } else { cat("\n") reports <- vapply(seq_len(n), function(i) { failure_summary(failures[[i]], labels[i]) }, character(1)) cat(paste(reports, collapse = "\n\n"), "\n", sep = "") if (show_praise && runif(1) < 0.25) { cat("\n", colourise(encourage(), "error"), "\n", sep = "") } } } ) ) labels <- c(1:9, letters, LETTERS) testthat/R/expectations-equality.R0000644000175100001440000001540712602540002017013 0ustar hornikusers#' Expectation: is the object equal to a value? #' #' \itemize{ #' \item \code{expect_identical} tests with \code{\link{identical}} #' \item \code{expect_equal} tests with \code{\link{all.equal}} #' \item \code{expect_equivalent} tests with \code{\link{all.equal}} and #' \code{check.attributes = FALSE} #' } # #' @param expected Expected value #' @param label For full form, label of expected object used in error #' messages. Useful to override default (deparsed expected expression) when #' doing tests in a loop. For short cut form, object label. When #' \code{NULL}, computed from deparsed object. #' @param expected.label Equivalent of \code{label} for shortcut form. #' @param ... other values passed to \code{\link{all.equal}} #' @family expectations #' @examples #' a <- 10 #' expect_equal(a, 10) #' #' # Use equals() when testing for numeric equality #' sqrt(2) ^ 2 - 1 #' expect_equal(sqrt(2) ^ 2, 2) #' # Neither of these forms take floating point representation errors into #' # account #' \dontrun{ #' expect_true(sqrt(2) ^ 2 == 2) #' expect_identical(sqrt(2) ^ 2, 2) #' } #' #' # You can pass on additional arguments to all.equal: #' \dontrun{ #' # Test the ABSOLUTE difference is within .002 #' expect_equal(object = 10.01, expected = 10, tolerance = .002, #' scale = 1) #' #' # Test the RELATIVE difference is within .002 #' expectedValue <- 10 #' expect_equal(object = 10.01, expected = expectedValue, tolerance = 0.002, #' scale = expectedValue) #' } #' #' # expect_equivalent ignores attributes #' a <- b <- 1:3 #' names(b) <- letters[1:3] #' expect_equivalent(a, b) #' @name equivalence NULL #' @export #' @rdname oldskool equals <- function(expected, label = NULL, ...) { if (is.null(label)) { label <- find_expr("expected") } else if (!is.character(label) || length(label) != 1) { label <- deparse(label) } function(actual) { same <- compare(actual, expected, ...) expectation( same$equal, paste0("not equal to ", label, "\n", same$message), paste0("equals ", label) ) } } #' @export #' @rdname equivalence #' @inheritParams expect_that expect_equal <- function(object, expected, ..., info = NULL, label = NULL, expected.label = NULL) { if (is.null(label)) { label <- find_expr("object") } if (is.null(expected.label)) { expected.label <- find_expr("expected") } expect_that(object, equals(expected, label = expected.label, ...), info = info, label = label) } #' @export #' @rdname oldskool is_equivalent_to <- function(expected, label = NULL) { if (is.null(label)) { label <- find_expr("expected") } else if (!is.character(label) || length(label) != 1) { label <- deparse(label) } function(actual) { equals(expected, check.attributes = FALSE)(actual) } } #' @export #' @rdname equivalence expect_equivalent <- function(object, expected, info = NULL, label = NULL, expected.label = NULL) { if (is.null(label)) { label <- find_expr("object") } if (is.null(expected.label)) { expected.label <- find_expr("expected") } expect_that(object, is_equivalent_to(expected, label = expected.label), info = info, label = label) } #' @export #' @rdname oldskool is_identical_to <- function(expected, label = NULL) { if (is.null(label)) { label <- find_expr("expected") } else if (!is.character(label) || length(label) != 1) { label <- deparse(label) } function(actual) { if (identical(actual, expected)) { diff <- "" } else { same <- all.equal(expected, actual) if (isTRUE(same)) { diff <- "Objects equal but not identical" } else { diff <- paste0(same, collapse = "\n") } } expectation( identical(actual, expected), paste0("is not identical to ", label, ". Differences: \n", diff), paste0("is identical to ", label) ) } } #' @export #' @rdname equivalence expect_identical <- function(object, expected, info = NULL, label = NULL, expected.label = NULL) { if (is.null(label)) { label <- find_expr("object") } if (is.null(expected.label)) { expected.label <- find_expr("expected") } expect_that(object, is_identical_to(expected, label = expected.label), info = info, label = label) } #' Expectation: is the object equal to a reference value stored in a file? #' #' This expectation is equivalent to \code{\link{expect_equal}}, except that the #' expected value is stored in an RDS file instead of being specified literally. #' This can be helpful when the value is necessarily complex. If the file does #' not exist then it will be created using the value of the specified object, #' and subsequent tests will check for consistency against that generated value. #' The test can be reset by deleting the RDS file. #' #' It is important to initialize the reference RDS file within the source #' package, most likely in the \code{tests/testthat/} directory. Testing spawned #' by \code{devtools::test()}, for example, will accomplish this. But note that #' testing spawned by \code{R CMD check} and \code{devtools::check()} will NOT. #' In the latter cases, the package source is copied to an external location #' before tests are run. The resulting RDS file will not make its way back into #' the package source and will not be available for subsequent comparisons. #' #' @inheritParams expect_that #' @param file The file name used to store the object. Should have an "rds" #' extension. #' @param label For the full form, a label for the expected object, which is #' used in error messages. Useful to override the default (which is based on #' the file name), when doing tests in a loop. For the short-cut form, the #' object label, which is computed from the deparsed object by default. #' @param expected.label Equivalent of \code{label} for shortcut form. #' @param ... other values passed to \code{\link{expect_equal}} #' @family expectations #' @export #' @examples #' \dontrun{ #' expect_equal_to_reference(1, "one.rds") #' } expect_equal_to_reference <- function(object, file, ..., info = NULL, label = NULL, expected.label = NULL) { if (is.null(label)) { label <- find_expr("object") } if (is.null(expected.label)) { expected.label <- paste("reference from", file) } expect_that(object, equals_reference(file, label = expected.label, ...), info = info, label = label) } #' @export #' @rdname oldskool equals_reference <- function(file, label = NULL, ...) { if (file.exists(file)) { reference <- readRDS(file) if (is.null(label)) { label <- paste("reference from", file) } equals(reference, label = label, ...) } else { function(actual) { # saveRDS() returns no useful information to use for the expectation saveRDS(actual, file) expectation(TRUE, "should never fail", "saved to file") } } } testthat/R/reporter-minimal.r0000644000175100001440000000175112453255332016012 0ustar hornikusers#' @include reporter.r NULL #' Test reporter: minimal. #' #' The minimal test reporter provides the absolutely minimum amount of #' information: whether each expectation has succeeded, failed or experienced #' an error. If you want to find out what the failures and errors actually #' were, you'll need to run a more informative test reporter. #' #' @export #' @keywords debugging #' @export MinimalReporter #' @aliases MinimalReporter #' @param ... Arguments used to initialise class MinimalReporter <- setRefClass("MinimalReporter", contains = "Reporter", methods = list( add_result = function(result) { callSuper(result) if (result$skipped) { cat(colourise("S", "skipped")) } else if (result$passed) { cat(colourise(".", "passed")) } else { if (result$error) { cat(colourise("F", "error")) } else { cat(colourise("E", "error")) } } }, end_reporter = function() { cat("\n") } ) ) testthat/R/reporter-list.r0000644000175100001440000000326412453747623015351 0ustar hornikusers#' @include reporter.r NULL setOldClass('proc_time') #' List reporter: gather all test results along with elapsed time and #' file information. #' #' This reporter gathers all results, adding additional information such as #' test elapsed time, and test filename if available. Very useful for reporting. #' #' @export #' @export ListReporter #' @aliases ListReporter #' @keywords debugging #' @param ... Arguments used to initialise class ListReporter <- setRefClass("ListReporter", contains = "Reporter", fields = list( start_test_time = 'proc_time', file = 'character', results = 'list', current_test_results = 'list'), methods = list( ### overriden methods from Reporter start_reporter = function(...) { callSuper(...) results <<- list() current_test_results <<- list() }, start_test = function(desc) { callSuper(desc) current_test_results <<- list() start_test_time <<- proc.time() }, end_test = function() { el <- as.double(proc.time() - start_test_time) fname <- if (length(file)) file else '' test_info <- list(file = fname, context = context, test = test, user = el[1], system = el[2], real = el[3], results = current_test_results) results <<- c(results, list(test_info)) current_test_results <<- list() callSuper() # at the end because it resets the test name }, add_result = function(result) { callSuper(result) current_test_results <<- c(current_test_results, list(result)) }, ### new methods start_file = function(name) { file <<- name }, get_summary = function() { summarize_results(results) } ) ) testthat/R/traceback.r0000644000175100001440000000166112524711050014434 0ustar hornikuserscreate_traceback <- function(callstack) { if (length(callstack) == 0) return() max_lines <- getOption("deparse.max.lines", Inf) # Convert to text calls <- lapply(callstack, deparse, width = getOption("width")) if (is.finite(max_lines)) { calls <- lapply(calls, function(x) x[seq_len(min(length(x), max_lines))]) } calls <- vapply(calls, paste0, collapse = "\n", FUN.VALUE = character(1)) # Extract srcrefs srcrefs <- lapply(callstack, attr, "srcref") has_ref <- vapply(srcrefs, function(x) inherits(x, "srcref"), logical(1)) files <- vapply(srcrefs[has_ref], function(x) attr(x, "srcfile")$filename, FUN.VALUE = character(1)) lines <- vapply(srcrefs[has_ref], function(x) as.vector(x)[1], FUN.VALUE = integer(1)) calls[has_ref] <- paste0(calls[has_ref], " at ", files, ":", lines) # Number and indent calls <- paste0(seq_along(calls), ": ", calls) calls <- gsub("\n", "\n ", calls) calls } testthat/R/test-that.r0000644000175100001440000000614112602555210014431 0ustar hornikusers#' Create a test. #' #' A test encapsulates a series of expectations about small, self-contained #' set of functionality. Each test is contained in a \link{context} and #' contains multiple expectation generated by \code{\link{equivalence}}. #' #' Tests are evaluated in their own environments, and should not affect #' global state. #' #' When run from the command line, tests return \code{NULL} if all #' expectations are met, otherwise it raises an error. #' #' @param desc test name. Names should be kept as brief as possible, as they #' are often used as line prefixes. #' @param code test code containing expectations #' @export #' @examples #' test_that("trigonometric functions match identities", { #' expect_equal(sin(pi / 4), 1 / sqrt(2)) #' expect_equal(cos(pi / 4), 1 / sqrt(2)) #' expect_equal(tan(pi / 4), 1) #' }) #' # Failing test: #' \dontrun{ #' test_that("trigonometric functions match identities", { #' expect_equal(sin(pi / 4), 1) #' }) #' } test_that <- function(desc, code) { test_code(desc, substitute(code), env = parent.frame()) invisible() } # Executes a test. # # @keywords internal # @param description the test name # @param code the code to be tested, needs to be an unevaluated expression # i.e. wrap it in substitute() # @param env the parent environment of the environment the test code runs in test_code <- function(description, code, env) { new_test_environment <- new.env(parent = env) get_reporter()$start_test(description) on.exit(get_reporter()$end_test()) capture_calls <- function(e) { # Capture call stack, removing last two calls from end (added by # withCallingHandlers), and first frame + 7 calls from start (added by # tryCatch etc) e$calls <- utils::head(sys.calls()[-seq_len(frame + 7)], -2) signalCondition(e) } frame <- sys.nframe() ok <- TRUE tryCatch( withCallingHandlers( eval(code, new_test_environment), error = capture_calls, message = function(c) invokeRestart("muffleMessage") ), error = function(e) { ok <- FALSE report <- expectation_error(e$message, e$calls) get_reporter()$add_result(report) }, skip = function(e) { report <- expectation_skipped(e$message) get_reporter()$add_result(report) } ) invisible(ok) } #' R package to make testing fun! #' #' Try the example below. Have a look at the references and learn more #' from function documentation such as \code{\link{expect_that}}. #' #' @details Software testing is important, but, in part because #' it is frustrating and boring, many of us avoid it. #' #' testthat is a new testing framework for R that is easy learn and use, #' and integrates with your existing workflow. #' #' @docType package #' @name testthat #' @references Wickham, H (2011). testthat: Get Started with Testing. #' \strong{The R Journal} \emph{3/1} 5-10. #' \url{http://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf} #' #' \url{https://github.com/hadley/testthat} #' #' \url{http://adv-r.had.co.nz/Testing.html} #' #' @examples #' library(testthat) #' a <- 9 #' expect_that(a, is_less_than(10)) #' expect_less_than(a, 10) NULL testthat/R/reporter-stop.r0000644000175100001440000000317512601000223015331 0ustar hornikusers#' @include reporter.r NULL #' Test reporter: stop on error. #' #' The default reporter, executed when \code{expect_that} is run #' interactively. It #' responds by \link{stop}()ing on failures and doing nothing otherwise. This #' will ensure that a failing test will raise an error. #' #' This should be used when doing a quick and dirty test, or during the final #' automated testing of R CMD check. Otherwise, use a reporter that runs all #' tests and gives you more context about the problem. #' #' @export #' @export StopReporter #' @aliases StopReporter #' @keywords debugging #' @param ... Arguments used to initialise class StopReporter <- setRefClass("StopReporter", contains = "Reporter", fields = c("failures"), methods = list( initialize = function(...) { failures <<- list() callSuper(...) }, start_test = function(desc) { test <<- desc }, end_test = function() { cur_test <- test test <<- NULL if (length(failures) == 0) return() messages <- vapply(failures, as.character, character(1)) if (length(messages) > 1) { messages <- paste0("* ", messages, collapse = "\n") } failures <<- list() msg <- paste0("Test failed: '", cur_test, "'\n", messages) stop(msg, call. = FALSE) }, add_result = function(result) { callSuper(result) if (result$passed) return() if (result$skipped) return() # If running in test suite, store, otherwise raise immediately. if (is.null(test)) { stop(result$failure_msg, call. = FALSE) } else { failures <<- c(failures, list(result)) } } ) ) testthat/R/test-package.r0000644000175100001440000000506612602555176015104 0ustar hornikuserstest_pkg_env <- function(package) { env <- new.env(parent = getNamespace(package)) # Supress warning messages from S4 env$.packageName <- package env } with_top_env <- function(env, code) { old <- options(topLevelEnvironment = env) on.exit(options(old), add = TRUE) code } #' Run all tests in an installed package. #' #' Test are run in an environment that inherits from the package's namespace #' environment, so that tests can access non-exported functions and variables. #' Tests should be placed in \code{tests/testthat}. Use \code{test_check} with #' \code{R CMD check} and \code{test_pacakge} interactively at the console. #' #' @section R CMD check: #' Create \code{tests/testthat.R} that contains: #' #' \preformatted{ #' library(testthat) #' library(yourpackage) #' #' test_check("yourpackage") #' } #' #' @param package package name #' @inheritParams test_dir #' @return the results as a "testthat_results" (list) #' @export #' @examples #' \dontrun{test_package("testthat")} test_package <- function(package, filter = NULL, reporter = "summary", ...) { # Ensure that test package returns silently if called recursively - this # will occur if test-all.R ends up in the same directory as all the other # tests. if (env_test$in_test) return(invisible()) env_test$in_test <- TRUE on.exit(env_test$in_test <- FALSE) test_path <- system.file("tests", package = package) if (test_path == "") stop("No tests found for ", package, call. = FALSE) # If testthat subdir exists, use that test_path2 <- file.path(test_path, "testthat") if (file.exists(test_path2)) { test_path <- test_path2 } else { warning("Placing tests in `inst/tests/` is deprecated. ", "Please use `tests/testthat/` instead", call. = FALSE) } run_tests(package, test_path, filter, reporter, ...) } run_tests <- function(package, test_path, filter, reporter, ...) { reporter <- find_reporter(reporter) env <- test_pkg_env(package) res <- with_top_env(env, { test_dir(test_path, reporter = reporter, env = env, filter = filter, ...) }) if (!all_passed(res)) { stop("Test failures", call. = FALSE) } invisible(res) } #' @inheritParams test_package #' @export #' @rdname test_package test_check <- function(package, filter = NULL, reporter = "check", ...) { require(package, character.only = TRUE) test_path <- "testthat" if (!utils::file_test('-d', test_path)) { stop("No tests found for ", package, call. = FALSE) } run_tests(package, test_path, filter, reporter, ...) } env_test <- new.env(parent = emptyenv()) env_test$in_test <- FALSE testthat/R/utils.r0000644000175100001440000000556612602565600013672 0ustar hornikusers`%||%` <- function(a, b) if (is.null(a)) b else a # Find expression that created a variable find_expr <- function(name, env = parent.frame()) { subs <- do.call("substitute", list(as.name(name), env)) paste0(deparse(subs, width.cutoff = 500), collapse = "\n") } # A version of grepl that's vectorised along pattern, not x grepl2 <- function(pattern, x, ...) { stopifnot(length(x) == 1) vapply(pattern, grepl, x, ..., FUN.VALUE = logical(1), USE.NAMES = FALSE) } starts_with <- function(string, prefix) { substr(string, 1, nchar(prefix)) == prefix } is_directory <- function(x) file.info(x)$isdir is_readable <- function(x) file.access(x, 4) == 0 is.error <- function(x) inherits(x, "error") is.skip <- function(x) inherits(x, "skip") #' Skip a test. #' #' This function allows you to skip a test if it's not currently available. #' This will produce an informative message, but will not cause the test #' suite to fail. #' #' @section Helpers: #' \code{skip_on_cran()} skips tests on CRAN, using the \code{NOT_CRAN} #' environment variable set by devtools. #' #' \code{skip_on_travis()} skips tests on travis by inspecting the #' \code{TRAVIS} environment variable. #' #' \code{skip_on_appveyor()} skips tests on appveyor by inspecting the #' \code{APPVEYOR} environment variable. #' #' \code{skip_if_not_installed()} skips a tests if a package is not installed #' (useful for suggested packages). #' #' @param message A message describing why the test was skipped. #' @export #' @examples #' if (FALSE) skip("No internet connection") skip <- function(message) { cond <- structure(list(message = message), class = c("skip", "condition")) stop(cond) } #' @export #' @param pkg Name of package to check for #' @rdname skip skip_if_not_installed <- function(pkg) { if (requireNamespace(pkg, quietly = TRUE)) return(invisible(TRUE)) skip(paste0(pkg, " not installed")) } #' @export #' @rdname skip skip_on_cran <- function() { if (identical(Sys.getenv("NOT_CRAN"), "true")) return(invisible(TRUE)) skip("On CRAN") } #' @export #' @param os Character vector of system names. Supported values are #' \code{"windows"}, \code{"mac"}, \code{"linux"} and \code{"solaris"}. #' @rdname skip skip_on_os <- function(os) { os <- match.arg(os, c("windows", "mac", "linux", "solaris"), several.ok = TRUE) sysname <- tolower(Sys.info()[["sysname"]]) switch(sysname, windows = if ("windows" %in% os) skip("On windows"), darwin = if ("mac" %in% os) skip("On Mac"), linux = if ("linux" %in% os) skip("On Linux"), sunos = if ("solaris" %in% os) skip("On Solaris") ) invisible(TRUE) } #' @export #' @rdname skip skip_on_travis <- function() { if (!identical(Sys.getenv("TRAVIS"), "true")) return(invisible(TRUE)) skip("On Travis") } #' @export #' @rdname skip skip_on_appveyor <- function() { if (!identical(Sys.getenv("APPVEYOR"), "True")) return() skip("On Appveyor") } testthat/R/expectations-silent.R0000644000175100001440000000146612606743704016476 0ustar hornikusers#' Expect that code has no output, messages, or warnings. #' #' @param expr Expression to evaluate #' @export #' @family expectations #' @examples #' expect_silent("123") #' #' f <- function() { #' message("Hi!") #' warning("Hey!!") #' print("OY!!!") #' } #' \dontrun{ #' expect_silent(f()) #' } expect_silent <- function(expr) { label <- find_expr("expr") out <- evaluate_promise(expr) outputs <- c( if (!identical(out$output, "")) "output", if (length(out$warnings) > 0) "warnings", if (length(out$messages) > 0) "messages" ) expect( length(outputs) == 0, paste0(label, " produced ", paste(outputs, collapse = ", ")) ) } expect <- function(passed, message) { exp <- expectation(passed, message, srcref = find_test_srcref()) get_reporter()$add_result(exp) invisible(exp) } testthat/R/expectation.r0000644000175100001440000000451012602567710015045 0ustar hornikusers#' Expectation class. #' #' Any expectation should return objects of this class - see the built in #' expectations for details. #' #' @param passed a single logical value indicating whether the test passed #' (\code{TRUE}), failed (\code{FALSE}), or threw an error (\code{NA}) #' @param failure_msg A text description of failure #' @param success_msg A text description of success #' @param srcref Source reference, if known #' @keywords internal #' @export expectation <- function(passed, failure_msg, success_msg = "unknown", srcref = NULL) { structure( list( passed = passed, error = FALSE, skipped = FALSE, failure_msg = failure_msg, success_msg = success_msg, srcref = srcref ), class = "expectation" ) } expectation_error <- function(error, calls) { msg <- gsub("Error.*?: ", "", as.character(error)) if (length(calls) > 0) { traceback <- create_traceback(calls) user_calls <- paste0(traceback, collapse = "\n") msg <- paste0(msg, "\n", user_calls) } else { # Need to remove trailing newline from error message to be consistent # with other messages msg <- gsub("\n$", "", msg) } structure( list( passed = FALSE, error = TRUE, skipped = FALSE, failure_msg = msg, success_msg = "no error occured" ), class = "expectation" ) } expectation_skipped <- function(error) { msg <- gsub("Error.*?: ", "", as.character(error)) structure( list( passed = FALSE, error = FALSE, skipped = TRUE, failure_msg = msg, success_msg = "not skipped" ), class = "expectation" ) } #' @export #' @rdname expectation #' @param x object to test for class membership is.expectation <- function(x) inherits(x, "expectation") #' @export print.expectation <- function(x, ...) cat(format(x), "\n") #' @export format.expectation <- function(x, ...) { if (x$passed) { paste0("As expected: ", x$success_msg) } else { paste0("Not expected: ", x$failure_msg, ".") } } #' @export as.character.expectation <- function(x, ...) format(x) negate <- function(expt) { stopifnot(is.expectation(expt)) # If it's an error, don't need to do anything if (expt$error) return(expt) opp <- expt opp$passed <- !expt$passed opp$failure_msg <- expt$success_msg opp$success_msg <- expt$failure_msg opp } testthat/R/reporter.r0000644000175100001440000000200312602556331014354 0ustar hornikusers#' Stub object for managing a reporter of tests. #' #' Do not clone directly from this object - children should implement all #' methods. #' #' @keywords internal. #' @export #' @export Reporter #' @aliases Reporter #' @param ... Arguments used to initialise class #' @importFrom methods setRefClass new Reporter <- setRefClass("Reporter", fields = list( context = "character", test = "ANY", failed = "logical", context_open = "logical" ), methods = list( initialize = function(...) { context_open <<- FALSE failed <<- FALSE test <<- NULL initFields(...) }, start_reporter = function() { }, start_context = function(desc) { context <<- desc }, start_test = function(desc) { test <<- desc }, add_result = function(result) { if (!result$passed && !result$skipped) { failed <<- TRUE } }, end_test = function() { test <<- "" }, end_context = function() {}, end_reporter = function() {} ) ) testthat/R/test-results.r0000644000175100001440000000366112601070363015176 0ustar hornikusers# format results from ListReporter summarize_results <- function(results_list) { if (is.null(results_list) || length(results_list) == 0) return(data.frame()) rows <- lapply(results_list, sumarize_one_test_results) do.call(rbind, rows) } sumarize_one_test_results <- function(test) { test_results <- test$results nb_tests <- length(test_results) nb_failed <- nb_skipped <- 0L error <- FALSE if (nb_tests > 0) { # error reports should be handled differently. # They may not correspond to an expect_that() test so remove them last_test <- test_results[[nb_tests]] error <- last_test$error if (error) { test_results <- test_results[- nb_tests] nb_tests <- length(test_results) } nb_passed <- sum(vapply(test_results, '[[', TRUE, 'passed')) nb_skipped <- sum(vapply(test_results, '[[', TRUE, 'skipped')) nb_failed <- nb_tests - nb_passed - nb_skipped } context <- if (length(test$context) > 0) test$context else '' res <- data.frame(file = test$file, context = context, test = test$test, nb = nb_tests, failed = nb_failed, skipped = as.logical(nb_skipped), error = error, user = test$user, system = test$system, real = test$real, stringsAsFactors = FALSE) res } #' Create a `testthat_results` object from the test results #' as stored in the ListReporter results field. #' #' @param results a list as stored in ListReporter #' @return its list argument as a `testthat_results` object #' @seealso ListReporter testthat_results <- function(results) { structure(results, class = "testthat_results") } # return if all tests are successful w/o error all_passed <- function(res) { df <- as.data.frame.testthat_results(res) if (ncol(df) == 0) return(TRUE) sum(df$failed) == 0 && all(!df$error) } #' @export as.data.frame.testthat_results <- function(x, ...) { summarize_results(x) } #' @export print.testthat_results <- function(x, ...) { print(as.data.frame(x)) } testthat/R/test-example.R0000644000175100001440000000162112601000547015057 0ustar hornikusers#' Test package examples #' #' These helper functions make it easier to test the examples in a package. #' Each example counts as one test, and it succeeds if the code runs without #' an error. #' #' @param path For \code{test_examples}, path to directory containing Rd files. #' For \code{test_example}, path to a single Rd file. Remember the working #' directory for tests is \code{tests/testthat}. #' @export test_examples <- function(path = "../../man") { man <- dir(path, "\\.Rd$", full.names = TRUE) lapply(man, test_example) } #' @export #' @rdname test_examples test_example <- function(path) { ex_path <- file.path(tempdir(), paste0(tools::file_path_sans_ext(basename(path)), ".R")) tools::Rd2ex(path, ex_path) if (!file.exists(ex_path)) return() env <- new.env(parent = globalenv()) ok <- test_code(path, parse(ex_path), env = globalenv()) if (ok) succeed(path) invisible() } testthat/R/reporter-tap.r0000644000175100001440000000334412602555543015153 0ustar hornikusers#' @include reporter.r NULL #' Test reporter: TAP format. #' #' This reporter will output results in the Test Anything Protocol (TAP), #' a simple text-based interface between testing modules in a test harness. #' For more information about TAP, see http://testanything.org #' #' @export #' @export TapReporter #' @aliases TapReporter #' @keywords debugging #' @param ... Arguments used to initialise class TapReporter <- setRefClass("TapReporter", contains = "Reporter", fields = list( "results" = "list", "n" = "integer", "has_tests" = "logical", "contexts" = "character"), methods = list( start_context = function(desc) { contexts[n+1] <<- desc; }, start_reporter = function() { results <<- list() n <<- 0L has_tests <<- FALSE contexts <<- NA_character_ }, add_result = function(result) { callSuper(result) has_tests <<- TRUE n <<- n + 1L; result$test <- if (is.null(test)) "(unknown)" else test results[[n]] <<- result }, end_reporter = function() { if(has_tests) { cat("1..", n, '\n', sep=''); for(i in 1:n) { if (! is.na(contexts[i])) { cat("# Context", contexts[i], "\n") } result <- results[[i]]; if (result$passed) { cat('ok', i, result$test, '\n') } else if (result$skipped) { cat('ok', i, '# SKIP', result$failure_msg, '\n') } else { cat('not ok', i, result$test, '\n') msg <- gsub('\n', '\n ', result$failure_msg) cat(' ', msg, '\n') } } } } ) ) testthat/R/test-files.r0000644000175100001440000001253712602756050014606 0ustar hornikusers#' Generate default testing environment. #' #' We use a new environment which inherits from \code{\link{globalenv}}. #' In an ideal world, we'd avoid putting the global environment on the #' search path for tests, but it's not currently possible without losing #' the ability to load packages in tests. #' #' @keywords internal #' @export test_env <- function() { new.env(parent = globalenv()) } #' Run all of the tests in a directory. #' #' Test files start with \code{test} and are executed in alphabetical order #' (but they shouldn't have dependencies). Helper files start with #' \code{helper} and loaded before any tests are run. #' #' @param path path to tests #' @param reporter reporter to use #' @param filter If not \code{NULL}, only tests with file names matching this #' regular expression will be executed. Matching will take on the file #' name after it has been stripped of \code{"test-"} and \code{".r"}. #' @param env environment in which to execute test suite. #' @param ... Additional arguments passed to \code{grepl} to control filtering. #' #' @return the results as a "testthat_results" (list) #' @export test_dir <- function(path, filter = NULL, reporter = "summary", env = test_env(), ...) { source_test_helpers(path, env) paths <- find_test_scripts(path, filter, ...) test_files(paths, reporter = reporter, env = env, ...) } test_files <- function(paths, reporter = "summary", env = test_env(), ...) { if (length(paths) == 0) stop('No matching test file in dir') current_reporter <- find_reporter(reporter) current_reporter$start_reporter() results <- lapply(paths, test_file, env = env, reporter = current_reporter, start_end_reporter = FALSE) current_reporter$end_reporter() results <- unlist(results, recursive = FALSE) invisible(testthat_results(results)) } #' Source the helper scripts if any. #' #' Helper scripts are R scripts accompanying test scripts #' but prefixed by \code{helper}. These scripts are sourced #' only one time in test environment. #' #' @inheritParams test_dir #' @keywords internal #' @export source_test_helpers <- function(path, env = globalenv()) { source_dir(path, "^helper.*\\.[rR]$", env = env) } #' Find the test files. #' @param path path to tests #' @param filter cf \code{\link{test_dir}} #' @param invert If \sQuote{TRUE} return files which do \emph{not} match. #' @param ... Additional arguments passed to \code{grepl} to control filtering. #' @return the test file paths #' @keywords internal #' @export find_test_scripts <- function(path, filter = NULL, invert = FALSE, ...) { files <- dir(path, "^test.*\\.[rR]$", full.names = TRUE) if (!is.null(filter)) { test_names <- basename(files) test_names <- gsub("^test-?", "", test_names) test_names <- gsub("\\.[rR]", "", test_names) which_files <- grepl(filter, test_names, ...) if (isTRUE(invert)) { which_files <- !which_files } files <- files[which_files] } files } #' Take care or finding the test files and sourcing the helpers. #' @inheritParams test_dir #' @param env environment in which to source the helpers #' @return the test file paths setup_test_dir <- function(path, filter, env) { source_dir(path, "^helper.*\\.[rR]$", env = env) find_test_scripts(path, filter) } #' Load all source files in a directory. #' #' The expectation is that the files can be sourced in alphabetical order. #' #' @param path path to tests #' @param pattern regular expression used to filter files #' @param env environment in which to store results #' @param chdir change working directory to path? #' @keywords internal #' @export source_dir <- function(path, pattern = "\\.[rR]$", env = test_env(), chdir = TRUE) { files <- normalizePath(sort(dir(path, pattern, full.names = TRUE))) if (chdir) { old <- setwd(path) on.exit(setwd(old)) } lapply(files, sys.source2, envir = env) } #' Run all tests in specified file. #' #' @param path path to file #' @param reporter reporter to use #' @param env environment in which to execute the tests #' @param start_end_reporter whether to start and end the reporter #' @return the results as a "testthat_results" (list) #' @export test_file <- function(path, reporter = "summary", env = test_env(), start_end_reporter = TRUE) { reporter <- find_reporter(reporter) if (is.null(env)) env <- globalenv() lister <- ListReporter$new() if (!is.null(reporter)) { reporter <- MultiReporter$new(reporters = list(reporter, lister)) } else { reporter <- lister } old_reporter <- set_reporter(reporter) old_dir <- setwd(dirname(path)) on.exit({ setwd(old_dir) set_reporter(old_reporter) }, add = TRUE) if (start_end_reporter) reporter$start_reporter() fname <- basename(path) lister$start_file(fname) sys.source2(fname, new.env(parent = env)) end_context() if (start_end_reporter) reporter$end_reporter() invisible(testthat_results(lister$results)) } sys.source2 <- function(file, envir = parent.frame()) { stopifnot(file.exists(file)) stopifnot(is.environment(envir)) lines <- readLines(file, warn = FALSE) srcfile <- srcfilecopy(file, lines, file.info(file)[1, "mtime"], isFile = TRUE) exprs <- parse(text = lines, n = -1, srcfile = srcfile) n <- length(exprs) if (n == 0L) return(invisible()) invisible(eval(exprs, envir)) } testthat/R/reporter-rstudio.R0000644000175100001440000000206112460511220015775 0ustar hornikusers#' @include reporter.r NULL #' Test reporter: RStudio #' #' This reporter is designed for output to RStudio. It produces results in #' any easily parsed form. #' #' @export #' @export RstudioReporter #' @aliases RstudioReporter #' @keywords debugging #' @param ... Arguments used to initialise class RstudioReporter <- setRefClass("RstudioReporter", contains = "Reporter", fields = list(), methods = list( add_result = function(result) { callSuper(result) if (result$passed) return() if (result$skipped) { status <- "info" prefix <- "Skipped" } else if (result$error) { status <- "error" prefix <- "Failed" } else { status <- "error" prefix <- "Errored" } ref <- result$srcref if (is.null(ref)) { location <- "?#?:?" } else { location <- paste0(attr(ref, "srcfile")$filename, "#", ref[1], ":1") } cat(location, " [", status, "] ", test, ". ", strsplit(result$failure_msg, "\n")[[1]][1], "\n", sep = "") } ) ) testthat/R/reporter-silent.r0000644000175100001440000000151112453255332015654 0ustar hornikusers#' @include reporter.r NULL #' Test reporter: gather all errors silently. #' #' This reporter quietly runs all tests, simply gathering the results #' for later use. This is helpful for programmatically inspecting errors #' after a test run. #' #' @export #' @export SilentReporter #' @aliases SilentReporter #' @keywords debugging #' @param ... Arguments used to initialise class SilentReporter <- setRefClass("SilentReporter", contains = "Reporter", fields = c("failures"), methods = list( initialize = function(...) { failures <<- list() callSuper(...) }, start_test = function(desc) { test <<- desc }, end_test = function() { test <<- NULL }, add_result = function(result) { callSuper(result) if (result$passed) return() failures[[test]] <<- result } ) ) testthat/R/describe.r0000644000175100001440000000554512325225031014300 0ustar hornikusers#' describe: a BDD testing language #' #' A simple BDD DSL for writing tests. The language is similiar to RSpec for #' Ruby or Mocha for JavaScript. BDD tests read like sentences and it should #' thus be easier to understand what the specification of a function/component #' is. #' #' Tests using the \code{describe} syntax not only verify the tested code, but #' also document its intended behaviour. Each \code{describe} block specifies a #' larger component or function and contains a set of specifications. A #' specification is definied by an \code{it} block. Each \code{it} block #' functions as a test and is evaluated in its own environment. You #' can also have nested \code{describe} blocks. #' #' #' This test syntax helps to test the intented behaviour of your code. For #' example: you want to write a new function for your package. Try to describe #' the specification first using \code{describe}, before your write any code. #' After that, you start to implement the tests for each specification (i.e. #' the \code{it} block). #' #' Use \code{describe} to verify that you implement the right things and use #' \code{\link{test_that}} to ensure you do the things right. #' #' @param description description of the feature #' @param code test code containing the specs #' @export #' @examples #' describe("matrix()", { #' it("can be multiplied by a scalar", { #' m1 <- matrix(1:4, 2, 2) #' m2 <- m1 * 2 #' expect_equivalent(matrix(1:4 * 2, 2, 2), m2) #' }) #' it("can have not yet tested specs") #' }) #' #' # Nested specs: #' ## code #' addition <- function(a, b) a + b #' division <- function(a, b) a / b #' #' ## specs #' describe("math library", { #' describe("addition()", { #' it("can add two numbers", { #' expect_equivalent(1 + 1, addition(1, 1)) #' }) #' }) #' describe("division()", { #' it("can divide two numbers", { #' expect_equivalent(10 / 2, division(10, 2)) #' }) #' it("can handle division by 0") #not yet implemented #' }) #' }) describe <- function(description, code) { is_invalid_description <- function (description) { (!is.character(description) || length(description) != 1 || nchar(description) == 0) } if (is_invalid_description(description)) { stop("description must be a string of at least length 1") } # prepares a new environment for each it-block describe_environment <- new.env(parent = parent.frame()) describe_environment$it <- function(it_description, it_code = NULL) { if (is_invalid_description(it_description)) { stop("it-description must be a string of at least length 1") } if (missing(it_code)) return() test_description <- paste0(description, ": ", it_description) test_code(test_description, substitute(it_code), env = describe_environment) } eval(substitute(code), describe_environment) invisible() } testthat/R/reporter-teamcity.r0000644000175100001440000000416012453255332016200 0ustar hornikusers#' @include reporter.r NULL #' Test reporter: Teamcity format. #' #' This reporter will output results in the Teamcity message format. #' For more information about Teamcity messages, see #' http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity #' #' @export #' @export TeamcityReporter #' @aliases TeamcityReporter #' @keywords debugging #' @param ... Arguments used to initialise class TeamcityReporter <- setRefClass("TeamcityReporter", contains = "Reporter", fields = list( "currentContext" = "character", "currentTest" = "character" ), methods = list( start_context = function(desc) { currentContext <<- desc teamcity("testSuiteStarted", currentContext) }, end_context = function() { teamcity("testSuiteFinished", currentContext) cat("\n\n") }, start_test = function(desc) { currentTest <<- desc teamcity("testSuiteStarted", currentTest) }, end_test = function() { teamcity("testSuiteFinished", currentTest) cat("\n") }, start_reporter = function() { currentContext <<- "" }, add_result = function(result) { callSuper(result) testName <- strsplit(result$success_msg, "\n")[[1]][1] if (result$skipped) { teamcity("testIgnored", testName, message = result$failure_msg) return() } teamcity("testStarted", testName) if (!result$passed) { lines <- strsplit(result$failure_msg, "\n")[[1]] teamcity("testFailed", testName, message = lines[1], details = paste(lines[-1], collapse = "\n") ) } teamcity("testFinished", testName) } ) ) teamcity <- function(event, name, ...) { values <- list(name = name, ...) values <- vapply(values, teamcity_escape, character(1)) if (length(values) == 0) { value_string <- "" } else { value_string <- paste0(names(values), "='", values, "'", collapse = " ") } cat("##teamcity[", event, " ", value_string, "]\n", sep = "") } # teamcity escape character is | teamcity_escape <- function(s) { s <- gsub("(['|]|\\[|\\])", "|\\1", s) gsub("\n", "|n", s) } testthat/R/context.r0000644000175100001440000000134612154346413014207 0ustar hornikusers#' Describe the context of a set of tests. #' #' A context defines a set of tests that test related functionality. Usually #' you will have one context per file, but you may have multiple contexts #' in a single file if you so choose. #' #' @param desc description of context. Should start with a capital letter. #' @export #' @examples #' context("String processing") #' context("Remote procedure calls") context <- function(desc) { rep <- get_reporter() if (rep$context_open) { rep$end_context() } else { rep$context_open <- TRUE } rep$start_context(desc) } end_context <- function() { rep <- get_reporter() if (!rep$context_open) return(invisible()) rep$end_context() rep$context_open <- FALSE invisible() } testthat/R/watcher.r0000644000175100001440000000612212602555352014157 0ustar hornikusers#' Watch a directory for changes (additions, deletions & modifications). #' #' This is used to power the \code{\link{auto_test}} and #' \code{\link{auto_test_package}} functions which are used to rerun tests #' whenever source code changes. #' #' Use Ctrl + break (windows), Esc (mac gui) or Ctrl + C (command line) to #' stop the watcher. #' #' @param path character vector of paths to watch. Omit trailing backslash. #' @param pattern file pattern passed to \code{\link{dir}} #' @param callback function called everytime a change occurs. It should #' have three parameters: added, deleted, modified, and should return #' TRUE to keep watching, or FALSE to stop. #' @param hash hashes are more accurate at detecting changes, but are slower #' for large files. When FALSE, uses modification time stamps #' @export watch <- function(path, callback, pattern = NULL, hash = TRUE) { prev <- dir_state(path, pattern, hash = hash) repeat { Sys.sleep(1) curr <- dir_state(path, pattern, hash = hash) changes <- compare_state(prev, curr) if (changes$n > 0) { # cat("C") keep_going <- TRUE try(keep_going <- callback(changes$added, changes$deleted, changes$modified)) if (!isTRUE(keep_going)) return(invisible()) } else { # cat(".") } prev <- curr } } #' Compute a digest of a filename, returning NA if the file doesn't #' exist. #' #' @param filename filename to compute digest on #' @return a digest of the file, or NA if it doesn't exist. #' @keywords internal safe_digest <- function(path) { if (!file.exists(path)) return(NA_character_) if (is_directory(path)) return(NA_character_) if (!is_readable(path)) return(NA_character_) digest::digest(path, file = TRUE) } #' Capture the state of a directory. #' #' @param path path to directory #' @param pattern regular expression with which to filter files #' @param hash use hash (slow but accurate) or time stamp (fast but less #' accurate) #' @keywords internal dir_state <- function(path, pattern = NULL, hash = TRUE) { files <- dir(path, pattern, full.names = TRUE) # It's possible for any of the files to be deleted between the dir() # call above and the calls below; `file.info` handles this # gracefully, but digest::digest doesn't -- so we wrap it. Both # cases will return NA for files that have gone missing. if (hash) { file_states <- vapply(files, safe_digest, character(1)) } else { file_states <- stats::setNames(file.info(files)$mtime, files) } file_states[!is.na(file_states)] } #' Compare two directory states. #' #' @param old previous state #' @param new current state #' @return list containing number of changes and files which have been #' \code{added}, \code{deleted} and \code{modified} #' @keywords internal compare_state <- function(old, new) { added <- setdiff(names(new), names(old)) deleted <- setdiff(names(old), names(new)) same <- intersect(names(old), names(new)) modified <- names(new[same])[new[same] != old[same]] n <- length(added) + length(deleted) + length(modified) list(n = n, added = added, deleted = deleted, modified = modified) } testthat/R/reporter-zzz.r0000644000175100001440000000313412602543713015215 0ustar hornikusers#' @include reporter-stop.r NULL #' Get/set reporter; execute code in specified reporter. #' #' Changes global reporter to that specified, runs code and the returns #' global reporter back to previous value. #' #' @keywords internal #' @param reporter test reporter to use #' @param code code block to execute #' @name reporter-accessors NULL testthat_env <- new.env() # Default has to be the stop reporter, since it is this that will be run by # default from the command line and in R CMD test. testthat_env$reporter <- StopReporter$new() #' @rdname reporter-accessors #' @export set_reporter <- function(reporter) { old <- testthat_env$reporter testthat_env$reporter <- reporter old } #' @rdname reporter-accessors #' @export get_reporter <- function() { testthat_env$reporter } #' @rdname reporter-accessors #' @export with_reporter <- function(reporter, code) { reporter <- find_reporter(reporter) old <- set_reporter(reporter) on.exit(set_reporter(old)) reporter$start_reporter() force(code) reporter$end_reporter() invisible(reporter) } #' Find reporter object given name or object. #' #' If not found, will return informative error message. #' Will return null if given NULL. #' #' @param reporter name of reporter #' @keywords internal find_reporter <- function(reporter) { if (is.null(reporter)) return(NULL) if (inherits(reporter, "Reporter")) return(reporter) name <- reporter substr(name, 1, 1) <- toupper(substr(name, 1, 1)) name <- paste0(name, "Reporter") if (!exists(name)) { stop("Can not find test reporter ", reporter, call. = FALSE) } get(name)$new() } testthat/R/compare.r0000644000175100001440000001245712602555163014160 0ustar hornikusers#' Provide human-readable comparison of two objects #' #' \code{compare} is similar to \code{\link[base]{all.equal}()}, but shows #' you examples of where the failures occured. #' #' @export #' @param x,y Objects to compare #' @param ... Additional arguments used to control specifics of comparison compare <- function(x, y, ...) { UseMethod("compare", x) } comparison <- function(equal = TRUE, message = "Equal") { stopifnot(is.logical(equal), length(equal) == 1) stopifnot(is.character(message), length(message) == 1) structure( list( equal = equal, message = message ), class = "comparison" ) } #' @export print.comparison <- function(x, ...) { if (x$equal) { cat("Equal\n") return() } cat(x$message) } #' @export #' @rdname compare compare.default <- function(x, y, ...){ same <- all.equal(x, y, ...) comparison(identical(same, TRUE), paste0(same, collapse = "\n")) } #' @param max_diffs Maximum number of differences to show #' @param max_lines Maximum number of lines to show from each difference #' @param width Width of output device #' @rdname compare #' @export #' @examples #' # Character ----------------------------------------------------------------- #' x <- c("abc", "def", "jih") #' compare(x, x) #' #' y <- paste0(x, "y") #' compare(x, y) #' #' compare(letters, paste0(letters, "-")) #' #' x <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus #' tincidunt auctor. Vestibulum ac metus bibendum, facilisis nisi non, pulvinar #' dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " #' y <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus #' tincidunt auctor. Vestibulum ac metus1 bibendum, facilisis nisi non, pulvinar #' dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " #' compare(x, y) #' compare(c(x, x), c(y, y)) #' compare.character <- function(x, y, ..., max_diffs = 5, max_lines = 5, width = getOption("width")) { if (identical(x, y)) return(comparison()) # If they're not the same type or length, fallback to default method if (!same_type(x, y)) return(NextMethod()) lx <- length(x) ly <- length(y) if (lx != ly) { length(x) <- length(y) <- max(lx, ly) length_diff <- sprintf("Lengths (%s, %s) differ\n", lx, ly) } else { length_diff <- NULL } # If vectorwise-equal, fallback to default method diff <- xor(is.na(x), is.na(y)) | x != y diff[is.na(diff)] <- FALSE which_diff <- which(diff) if (length(which_diff) == 0L) { return(NextMethod()) } width <- width - 6 # allocate space for labels n_show <- seq_len(min(length(which_diff), max_diffs)) show <- which_diff[n_show] encode <- function(x) encodeString(x, quote = '"') show_x <- str_trunc(encode(x[show]), width * max_lines) show_y <- str_trunc(encode(y[show]), width * max_lines) sidebyside <- Map(function(x, y, pos) { x <- if (pos <= lx) paste0("x[", pos, "]: ", str_chunk(x, width)) y <- if (pos <= ly) paste0("y[", pos, "]: ", str_chunk(y, width)) paste(c(x, y), collapse = "\n") }, show_x, show_y, show) msg <- paste0(length_diff, sum(diff), " string mismatches:\n", paste0(sidebyside, collapse = "\n\n")) comparison(FALSE, msg) } str_trunc <- function(x, length) { too_long <- nchar(x) > length x[too_long] <- paste0(substr(x[too_long], 1, length - 3), "...") x } str_chunk <- function(x, length) { lines <- ceiling(nchar(x) / length) start <- (seq_len(lines) - 1) * length + 1 substring(x, start, start + length - 1) } #' @export #' @rdname compare #' @examples #' # Numeric ------------------------------------------------------------------- #' #' x <- y <- runif(100) #' y[sample(100, 10)] <- 5 #' compare(x, y) #' #' x <- y <- 1:10 #' x[5] <- NA #' x[6] <- 6.5 #' compare(x, y) #' #' # Compare ignores minor numeric differences in the same way #' # as all.equal. #' compare(x, x + 1e-9) compare.numeric <- function(x, y, ..., max_diffs = 10) { equal <- all.equal(x, y, ...) if (isTRUE(equal)) return(comparison()) # If they're not the same type or length, fallback to default method equal <- paste0(equal, collapse = "\n") if (!is.integer(x) && !is.numeric(y)) return(comparison(FALSE, equal)) if (length(x) != length(y)) return(comparison(FALSE, equal)) if (length(x) == 1) { msg <- paste0(format(x, digits = 3), " - ", format(y, digits = 3), " == ", format(x - y, digits = 3)) return(comparison(FALSE, msg)) } # If vectorwise-equal, fallback to default method diff <- xor(is.na(x), is.na(y)) | x != y diff[is.na(diff)] <- FALSE if (!any(diff)) { return(NextMethod()) } mismatch <- data.frame(pos = which(diff), x = x[diff], y = y[diff]) mismatch$diff <- mismatch$x - mismatch$y n <- min(max_diffs, nrow(mismatch)) mu <- mean(abs(x[diff] - y[diff]), na.rm = TRUE) msg <- paste0( sum(diff), "/", length(diff), " mismatches ", "(average diff: ", format(mu, digits = 3), ").\n", "First ", n, ":\n", print_out(mismatch[1:n, , drop = FALSE], row.names = FALSE, digits = 3) ) comparison(FALSE, msg) } print_out <- function(x, ...) { lines <- utils::capture.output(print(x, ...)) paste0(lines, collapse = "\n") } same_type <- function(x, y) { if (typeof(x) != typeof(y)) return(FALSE) if (!identical(class(x), class(y))) return(FALSE) TRUE } testthat/R/praise.R0000644000175100001440000000107412602554460013745 0ustar hornikuserspraise <- function() { x <- c( "You rock!", "You are a coding rockstar!", "Keep up the good work.", ":)", "Woot!", "Way to go!", "Nice code.", praise::praise("Your tests are ${adjective}!"), praise::praise("${EXCLAMATION} - ${adjective} code.") ) sample(x, 1) } encourage <- function() { x <- c( "Keep trying!", "Don't worry, you'll get it.", "No-one is perfect!", "No-one gets it right on their first try", "Frustration is a natural part of programming :)", "I believe in you!" ) sample(x, 1) } testthat/README.md0000644000175100001440000000462712607267313013427 0ustar hornikusers# testthat [![Travis-CI Build Status](https://travis-ci.org/hadley/testthat.svg?branch=master)](https://travis-ci.org/hadley/testthat) [![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/github/hadley/testthat?branch=master&svg=true)](https://ci.appveyor.com/project/hadley/testthat) [![Coverage Status](https://img.shields.io/codecov/c/github/hadley/testthat/master.svg)](https://codecov.io/github/hadley/testthat?branch=master) [![CRAN RStudio mirror downloads](http://cranlogs.r-pkg.org/badges/testthat)](http://cran.r-project.org/web/packages/testthat/index.html) [![CRAN version](http://www.r-pkg.org/badges/version/testthat)](http://cran.r-project.org/package=testthat) Testing your code is normally painful and boring. `testthat` tries to make testing as fun as possible, so that you get a visceral satisfaction from writing tests. Testing should be fun, not a drag, so you do it all the time. To make that happen, `testthat`: * Provides functions that make it easy to describe what you expect a function to do, including catching errors, warnings and messages. * Easily integrates in your existing workflow, whether it's informal testing on the command line, building test suites or using R CMD check. * Can re-run tests automatically as you change your code or tests. * Displays test progress visually, showing a pass, fail or error for every expectation. If you're using the terminal, it'll even colour the output. `testthat` draws inspiration from the xUnit family of testing packages, as well from many of the innovative ruby testing libraries, like [rspec](http://rspec.info/), [testy](https://github.com/ahoward/testy), [bacon](https://github.com/chneukirchen/bacon) and [cucumber](https://cucumber.io). I have used what I think works for R, and abandoned what doesn't, creating a testing environment that is philosophically centred in R. Instructions for using this package can be found in the [Testing](http://r-pkgs.had.co.nz/tests.html) chapter of [R packages](http://r-pkgs.had.co.nz/). ## Integration with R CMD check If you're using testthat in a package, you should put your tests in `tests/testthat`. Each test file should start with `test` and end in `.R` or `.r`. To ensure `R CMD check` runs your tests, place the following code in `tests/testthat.R`: ```R library(testthat) library(yourpackage) test_check("yourpackage") ``` Also make sure to add `Suggests: testthat` to your `DESCRIPTION`. testthat/MD50000644000175100001440000001625412607405030012445 0ustar hornikusersd0273d3ae2a3e02a17be117d117ed6de *DESCRIPTION d75b45329d0d6111164c723033e807c3 *LICENSE 38480b07925f688a1a4ba90d805d3673 *NAMESPACE 3f46df23e5c36a3f5ca3a8cdaed76d4c *R/auto-test.r e8a503db5f011cc0b286b1dc1e41b171 *R/colour-text.r 5a1f764257503bb9c8af0929273909d6 *R/compare.r 5934f124309b429bb726c18b9dedf616 *R/context.r e4c35326b4edb0c51354782099dba55b *R/describe.r feaa4dfcb6be8eaf0e78846fdfc14cdc *R/evaluate-promise.r 34b299f58d23a0da11744296856eaea2 *R/expect-that.r 36d8c29d951f3ec40f579e7c082dc935 *R/expectation.r c2fab15dc0c00ba2141e88c7fb466f3b *R/expectations-equality.R 6008f8a6a21ad9239ef9fb4628242269 *R/expectations-matches.R 8b5bb4f46504d47dfbc89bcd4818e385 *R/expectations-old.R 604a64284db62c6c4c5ca580eab75d32 *R/expectations-silent.R 2b69a300dbc387249d54d78d686e75fa *R/expectations.r eedb0f03a465fb7e08ea26a6f9fadd39 *R/make-expectation.r 8ca4a980e2c07f366ba3d9c82c7c8aa3 *R/mock.r bcda6090e1407ac2455e8dbdf6c415ac *R/praise.R 2f77615c23aec8fb292f9e5df856b96c *R/reporter-check.R 49ed48ce2fa9dfc09d35bcd9d1517d39 *R/reporter-list.r dffebddfef3b774467a698ecb5738f8f *R/reporter-minimal.r 2dd4543a91451ac3f3c67e789f72dd81 *R/reporter-multi.r 61418564ca454f60cb9fa0ef171e15a5 *R/reporter-rstudio.R af5f37023a84fc2c90f2b7febf0b1236 *R/reporter-silent.r 96153f024a62fa65dcee4db86b9492f0 *R/reporter-stop.r fbb4e238da4765a3360e4b7625898bcc *R/reporter-summary.r eb10cd82f36f567151019c99d73417b7 *R/reporter-tap.r fc7c8dbf65e13744f858c73c2d60e38e *R/reporter-teamcity.r 5bab38dbb33c0642839db2e2091601b9 *R/reporter-zzz.r 02c71b8d775303187b79b1d43be52ee2 *R/reporter.r 596e8893dbf4924b9f611f35b686eb16 *R/test-example.R b4b120c686875e12fc667cb927490449 *R/test-files.r 258d03f42a93889279af4d80c2f9bdd2 *R/test-package.r fddec4716c0d5bf4025af2d8e0d1e4df *R/test-results.r 367f1bdb1d6a77a72aa326926ea9bdff *R/test-that.r 64ec084241aba8ea35bada373aac0bee *R/traceback.r 3634bccd48bdb6ace48def866defd6b7 *R/utils.r c0925bdf7756a879b1f0bea07e229f6a *R/watcher.r 1617b5bc1f74618589aeddb229d99caa *README.md 591f70a8f8cca448a1ec9f7cf4f4807e *inst/CITATION 6655786ee4958b5c847926fe8d3b15b0 *man/CheckReporter-class.Rd ecfee26cac9c0a69d996d2342d3e6940 *man/ListReporter-class.Rd 181f68e20cdb21e1ffe2efc268812d28 *man/MinimalReporter-class.Rd 6f919e6b1d9010aaf4135f2aab8e9ce9 *man/MultiReporter-class.Rd 5ad119b0bdb56c0480ab54bfe058926e *man/Reporter-class.Rd 9bbac0b9896832de36c209e4f803e155 *man/RstudioReporter-class.Rd 2209204179a3f93e39d4c5df7eb24647 *man/SilentReporter-class.Rd 3a5dbc235fb1a060404fd26fb8b326ac *man/StopReporter-class.Rd 05ba9d2eb5564da17fe1e4c2d2a01b26 *man/SummaryReporter-class.Rd 5c17559ada9187f23ff499f9779e70f2 *man/TapReporter-class.Rd 094295c4f65aa7a50233013fc50e9b35 *man/TeamcityReporter-class.Rd 55c75918eefb5f14022c05bb779bcc57 *man/auto_test.Rd 50664467b9569cd991018826f6f959cf *man/auto_test_package.Rd f679e480651b8c4677e6dd6aab8b65c6 *man/compare.Rd 8c2237f602f03240d1fffd5b884e0a86 *man/compare_state.Rd 7400366a787e24257b18d05853b74fc9 *man/context.Rd 7c7165941fd8fe4c43d78e48aaa28a22 *man/describe.Rd 07f396a22d4493adbff86c8f92d56b50 *man/dir_state.Rd ec1fe5f80cc8c51d0cf866a2df990970 *man/equivalence.Rd 88abd9e74cf82137cbabadfa6e5567a5 *man/evaluate_promise.Rd 629c1253756f75e54c1841fe7ee241a7 *man/expect-compare.Rd 98b6a6834650bc015bd6c588dde84c90 *man/expect_equal_to_reference.Rd 20f13bc365ec2f88808e76adbf05ae78 *man/expect_is.Rd 7a8845862d090b1f3f8f48e8fc8bdd60 *man/expect_named.Rd d92c9ff5934ec22350e5eade6be4b87d *man/expect_null.Rd 9259e0d0289168cb1c6f51eb2b38ed97 *man/expect_silent.Rd cc8a6a933619dea078b835320cf128d7 *man/expect_that.Rd 99086721241fd0fe9719e80c127ed6a4 *man/expect_true.Rd 474247c6c0d6c2c8558487f9b5ec515e *man/expectation.Rd 5bbc37ad5b3977fdc952cedd2bae145b *man/fail.Rd 57ffdc42ce21aecea91741eab3ed5eec *man/find_reporter.Rd 21d33357efcee14fee5a89593debe571 *man/find_test_scripts.Rd 3b6f5e7c9cac1c573944fa6baf801ae0 *man/make_expectation.Rd df7a706b47a44ce33df1caf49a604dc1 *man/matching-expectations.Rd 9e883c6c9199f904510a10631bf25847 *man/not.Rd 33ad04a9f90ad2cf1a08f35b0bc7f1b3 *man/oldskool.Rd d63f842e7c2107ce6d7153ae49d61da6 *man/reporter-accessors.Rd d4a4eed58f8c35b96f6e400e25ca20e9 *man/safe_digest.Rd 42270ac590545d2ab3542ff486af1a3f *man/setup_test_dir.Rd a6d6dc4958651c6e6a095e58f0b131f9 *man/skip.Rd 2d96b458d4512f6feac1b950f079770c *man/source_dir.Rd 9da3060a12bfb6bcdfd6c5c79b539f1e *man/source_test_helpers.Rd 191f6325c719ced57a9f347e9f14e139 *man/succeed.Rd 6b16906e97950ccd62b81a17031e7b85 *man/takes_less_than.Rd 9616ba73281add7804d0cbd4f5530ab3 *man/test_dir.Rd 4e42c5d2fd3013be32e77ab7fba69fef *man/test_env.Rd 40051abd48851ced04b5347e2bcd0fa4 *man/test_examples.Rd ede441c263e8cf719699827c78145293 *man/test_file.Rd 66373ff7b7ea17f13334204cbc137dd7 *man/test_package.Rd de797b6f31f4336b63a0d2e636b466b2 *man/test_that.Rd 4474feb77de86810bc0a49098831ca21 *man/testthat.Rd 05f991b5b08437ff5a9ebac240a2c2d8 *man/testthat_results.Rd a5cabd279b376ffd295cd74e154b2fb8 *man/watch.Rd e2112c759ba0b11a9a34f4060b098273 *man/with_mock.Rd 8d71e14ae7b661a36b31c729f0b5a994 *src/reassign.c 3e17960f6ee391331b861ecf5a4c1d2c *tests/testthat.R dca111da574904db7ea9c6f8d205a1c4 *tests/testthat/context.r 7340014f5a98c1db4a52789a689ee123 *tests/testthat/helper-assign.R cfde242811815c10019817cd919d7719 *tests/testthat/one.rds 965fc56da5333012ae5068c826308af5 *tests/testthat/test-bare.r 662c140eb5bd6c4bc887745e0a365aa1 *tests/testthat/test-basics.r cc8b3e4cf85935bb89833c6f2c4074f1 *tests/testthat/test-colour.r cef4f59008fd2b52bfc6fd18f9e7d616 *tests/testthat/test-compare.r 6c84e8f58a3a58830f3a83ca50cc3e27 *tests/testthat/test-context.r b78b84a2c2bf9dbe07621e034e233281 *tests/testthat/test-describe.r 5cd8a93f777520ffe9e1049dc53a325b *tests/testthat/test-environment.R 3e376dd6f6f29ee33b1b364e9e2e33b3 *tests/testthat/test-expect_that.r 1131c0565a6f86a6442b29565c008add *tests/testthat/test-expectations.r 66c94bdc96839811336b9cdbc46b0c65 *tests/testthat/test-helpers.R 6656bac0d6c4e3b6f46e3a73065a04c6 *tests/testthat/test-line_numbers.r db2652b8128125a1be1bd26c7b907d84 *tests/testthat/test-mock.r 39e8e9c6d0e094f8510e9468fb1b8cf4 *tests/testthat/test-negations.r 1d7745fe6289af4409a98ec24d5b50a5 *tests/testthat/test-reporter-list.r 779649025de9cac96c7f3b71c76f8f56 *tests/testthat/test-reporter-multi.r 1c2ae58d964ad9e668eb692d681e252b *tests/testthat/test-reporter-tap.r 9b732d80a700a45893d9a2f55fdf55e5 *tests/testthat/test-reporter.r 75dc899785858752a4c5bda459ceace9 *tests/testthat/test-silence.R f876deae8a0120c1564f18d367019dbc *tests/testthat/test-source_dir.r ad6e6cb3a1a8817f85adcb6de38b199b *tests/testthat/test-test_dir.r 3d0d2c4f1595fa72cdd4d7e518561f4e *tests/testthat/test-watcher.r 14084a2d542131dd9053b354196b1d99 *tests/testthat/test-xxx.r 2c0b0d7a13ce46b068d8b29383f0c9d9 *tests/testthat/test_dir/helper_hello.r a96923901555694e457d4ed22b47a897 *tests/testthat/test_dir/test-bare-expectations.r df5e168d7d94b398312e588a77818ede *tests/testthat/test_dir/test-basic.r b6a4549ac522212cba9f0323b38b4312 *tests/testthat/test_dir/test-empty.r 50b32ea9869ea59c98e488dd70556207 *tests/testthat/test_dir/test-errors.r d4614c8b6b935d7ff1d8b07a1860876c *tests/testthat/test_dir/test-failures.r 8a9306859e8314cd90d44747e1cc4e04 *tests/testthat/test_dir/test-helper.r 4545cb72f7310794c70b6a88f0d91b73 *tests/testthat/test_dir/test-skip.r testthat/DESCRIPTION0000644000175100001440000000260512607405030013636 0ustar hornikusersPackage: testthat Version: 0.11.0 Title: Unit Testing for R Description: A unit testing system designed to be fun, flexible and easy to set up. Authors@R: c( person("Hadley", "Wickham", , "hadley@rstudio.com", c("aut", "cre")), person("RStudio", role = "cph") ) URL: https://github.com/hadley/testthat BugReports: https://github.com/hadley/testthat/issues Depends: R (>= 3.1.0), methods Imports: digest, crayon, praise Suggests: devtools License: MIT + file LICENSE Collate: 'auto-test.r' 'colour-text.r' 'compare.r' 'context.r' 'describe.r' 'evaluate-promise.r' 'expect-that.r' 'expectation.r' 'expectations-equality.R' 'expectations-matches.R' 'expectations-old.R' 'expectations-silent.R' 'expectations.r' 'make-expectation.r' 'mock.r' 'praise.R' 'reporter.r' 'reporter-check.R' 'reporter-list.r' 'reporter-minimal.r' 'reporter-multi.r' 'reporter-rstudio.R' 'reporter-silent.r' 'reporter-stop.r' 'reporter-summary.r' 'reporter-tap.r' 'reporter-teamcity.r' 'reporter-zzz.r' 'test-example.R' 'test-files.r' 'test-package.r' 'test-results.r' 'test-that.r' 'traceback.r' 'utils.r' 'watcher.r' NeedsCompilation: yes Packaged: 2015-10-13 20:53:56 UTC; hadley Author: Hadley Wickham [aut, cre], RStudio [cph] Maintainer: Hadley Wickham Repository: CRAN Date/Publication: 2015-10-14 09:54:00 testthat/man/0000755000175100001440000000000012606743704012714 5ustar hornikuserstestthat/man/RstudioReporter-class.Rd0000644000175100001440000000065512606743734017473 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-rstudio.R \docType{class} \name{RstudioReporter-class} \alias{RstudioReporter} \alias{RstudioReporter-class} \title{Test reporter: RStudio} \arguments{ \item{...}{Arguments used to initialise class} } \description{ This reporter is designed for output to RStudio. It produces results in any easily parsed form. } \keyword{debugging} testthat/man/test_file.Rd0000644000175100001440000000107012606743734015162 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-files.r \name{test_file} \alias{test_file} \title{Run all tests in specified file.} \usage{ test_file(path, reporter = "summary", env = test_env(), start_end_reporter = TRUE) } \arguments{ \item{path}{path to file} \item{reporter}{reporter to use} \item{env}{environment in which to execute the tests} \item{start_end_reporter}{whether to start and end the reporter} } \value{ the results as a "testthat_results" (list) } \description{ Run all tests in specified file. } testthat/man/describe.Rd0000644000175100001440000000403012606743734014763 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/describe.r \name{describe} \alias{describe} \title{describe: a BDD testing language} \usage{ describe(description, code) } \arguments{ \item{description}{description of the feature} \item{code}{test code containing the specs} } \description{ A simple BDD DSL for writing tests. The language is similiar to RSpec for Ruby or Mocha for JavaScript. BDD tests read like sentences and it should thus be easier to understand what the specification of a function/component is. } \details{ Tests using the \code{describe} syntax not only verify the tested code, but also document its intended behaviour. Each \code{describe} block specifies a larger component or function and contains a set of specifications. A specification is definied by an \code{it} block. Each \code{it} block functions as a test and is evaluated in its own environment. You can also have nested \code{describe} blocks. This test syntax helps to test the intented behaviour of your code. For example: you want to write a new function for your package. Try to describe the specification first using \code{describe}, before your write any code. After that, you start to implement the tests for each specification (i.e. the \code{it} block). Use \code{describe} to verify that you implement the right things and use \code{\link{test_that}} to ensure you do the things right. } \examples{ describe("matrix()", { it("can be multiplied by a scalar", { m1 <- matrix(1:4, 2, 2) m2 <- m1 * 2 expect_equivalent(matrix(1:4 * 2, 2, 2), m2) }) it("can have not yet tested specs") }) # Nested specs: ## code addition <- function(a, b) a + b division <- function(a, b) a / b ## specs describe("math library", { describe("addition()", { it("can add two numbers", { expect_equivalent(1 + 1, addition(1, 1)) }) }) describe("division()", { it("can divide two numbers", { expect_equivalent(10 / 2, division(10, 2)) }) it("can handle division by 0") #not yet implemented }) }) } testthat/man/test_package.Rd0000644000175100001440000000240512606743734015641 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-package.r \name{test_package} \alias{test_check} \alias{test_package} \title{Run all tests in an installed package.} \usage{ test_package(package, filter = NULL, reporter = "summary", ...) test_check(package, filter = NULL, reporter = "check", ...) } \arguments{ \item{package}{package name} \item{filter}{If not \code{NULL}, only tests with file names matching this regular expression will be executed. Matching will take on the file name after it has been stripped of \code{"test-"} and \code{".r"}.} \item{reporter}{reporter to use} \item{...}{Additional arguments passed to \code{grepl} to control filtering.} } \value{ the results as a "testthat_results" (list) } \description{ Test are run in an environment that inherits from the package's namespace environment, so that tests can access non-exported functions and variables. Tests should be placed in \code{tests/testthat}. Use \code{test_check} with \code{R CMD check} and \code{test_pacakge} interactively at the console. } \section{R CMD check}{ Create \code{tests/testthat.R} that contains: \preformatted{ library(testthat) library(yourpackage) test_check("yourpackage") } } \examples{ \dontrun{test_package("testthat")} } testthat/man/fail.Rd0000644000175100001440000000070312606743734014121 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expect-that.r \name{fail} \alias{fail} \title{A default expectation that always fails.} \usage{ fail(message = "Failure has been forced") } \arguments{ \item{message}{a string to display.} } \description{ The fail function forces a test to fail. This is useful if you want to test a pre-condition ' } \examples{ \dontrun{ test_that("this test fails", fail()) } } testthat/man/expect_is.Rd0000644000175100001440000000312312606743734015170 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectations.r \name{expect_is} \alias{expect_is} \title{Expectation: does the object inherit from a class?} \usage{ expect_is(object, class, info = NULL, label = NULL) } \arguments{ \item{object}{object to test} \item{class}{character vector of class names} \item{info}{extra information to be included in the message (useful when writing tests in loops).} \item{label}{object label. When \code{NULL}, computed from deparsed object.} } \description{ Tests whether or not an object inherits from any of a list of classes. } \examples{ expect_is(1, "numeric") a <- matrix(1:10, nrow = 5) expect_is(a, "matrix") expect_is(mtcars, "data.frame") # alternatively for classes that have an is method expect_true(is.data.frame(mtcars)) } \seealso{ \code{\link{inherits}} Other expectations: \code{\link{equivalence}}, \code{\link{expect_equal}}, \code{\link{expect_equivalent}}, \code{\link{expect_identical}}; \code{\link{expect-compare}}, \code{\link{expect_gt}}, \code{\link{expect_gte}}, \code{\link{expect_less_than}}, \code{\link{expect_lt}}, \code{\link{expect_lte}}, \code{\link{expect_more_than}}; \code{\link{expect_equal_to_reference}}; \code{\link{expect_error}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_output}}, \code{\link{expect_warning}}, \code{\link{matching-expectations}}; \code{\link{expect_false}}, \code{\link{expect_true}}; \code{\link{expect_named}}; \code{\link{expect_null}}; \code{\link{expect_silent}}; \code{\link{takes_less_than}} } testthat/man/find_reporter.Rd0000644000175100001440000000061612606743734016053 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-zzz.r \name{find_reporter} \alias{find_reporter} \title{Find reporter object given name or object.} \usage{ find_reporter(reporter) } \arguments{ \item{reporter}{name of reporter} } \description{ If not found, will return informative error message. Will return null if given NULL. } \keyword{internal} testthat/man/compare_state.Rd0000644000175100001440000000072212606743734016035 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/watcher.r \name{compare_state} \alias{compare_state} \title{Compare two directory states.} \usage{ compare_state(old, new) } \arguments{ \item{old}{previous state} \item{new}{current state} } \value{ list containing number of changes and files which have been \code{added}, \code{deleted} and \code{modified} } \description{ Compare two directory states. } \keyword{internal} testthat/man/SilentReporter-class.Rd0000644000175100001440000000077012606743734017276 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-silent.r \docType{class} \name{SilentReporter-class} \alias{SilentReporter} \alias{SilentReporter-class} \title{Test reporter: gather all errors silently.} \arguments{ \item{...}{Arguments used to initialise class} } \description{ This reporter quietly runs all tests, simply gathering the results for later use. This is helpful for programmatically inspecting errors after a test run. } \keyword{debugging} testthat/man/make_expectation.Rd0000644000175100001440000000117312606743734016530 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/make-expectation.r \name{make_expectation} \alias{make_expectation} \title{Make an equality test.} \usage{ make_expectation(x, expectation = "equals") } \arguments{ \item{x}{a vector of values} \item{expectation}{the type of equality you want to test for (\code{equals}, \code{is_equivalent_to}, \code{is_identical_to})} } \description{ This a convenience function to make a expectation that checks that input stays the same. } \examples{ x <- 1:10 make_expectation(x) make_expectation(mtcars$mpg) df <- data.frame(x = 2) make_expectation(df) } testthat/man/test_examples.Rd0000644000175100001440000000120312606743734016057 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-example.R \name{test_examples} \alias{test_example} \alias{test_examples} \title{Test package examples} \usage{ test_examples(path = "../../man") test_example(path) } \arguments{ \item{path}{For \code{test_examples}, path to directory containing Rd files. For \code{test_example}, path to a single Rd file. Remember the working directory for tests is \code{tests/testthat}.} } \description{ These helper functions make it easier to test the examples in a package. Each example counts as one test, and it succeeds if the code runs without an error. } testthat/man/expect_that.Rd0000644000175100001440000000166612606743734015527 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expect-that.r \name{expect_that} \alias{expect_that} \title{Expect that a condition holds.} \usage{ expect_that(object, condition, info = NULL, label = NULL) } \arguments{ \item{object}{object to test} \item{condition,}{a function that returns whether or not the condition is met, and if not, an error message to display.} \item{info}{extra information to be included in the message (useful when writing tests in loops).} \item{label}{object label. When \code{NULL}, computed from deparsed object.} } \value{ the (internal) expectation result as an invisible list } \description{ An old style of testing that's no longer encouraged. } \examples{ expect_that(5 * 2, equals(10)) expect_that(sqrt(2) ^ 2, equals(2)) \dontrun{ expect_that(sqrt(2) ^ 2, is_identical_to(2)) } } \seealso{ \code{\link{fail}} for an expectation that always fails. } \keyword{internal} testthat/man/TapReporter-class.Rd0000644000175100001440000000101512606743734016555 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-tap.r \docType{class} \name{TapReporter-class} \alias{TapReporter} \alias{TapReporter-class} \title{Test reporter: TAP format.} \arguments{ \item{...}{Arguments used to initialise class} } \description{ This reporter will output results in the Test Anything Protocol (TAP), a simple text-based interface between testing modules in a test harness. For more information about TAP, see http://testanything.org } \keyword{debugging} testthat/man/expect-compare.Rd0000644000175100001440000000401312606743734016120 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectations.r \name{expect-compare} \alias{expect-compare} \alias{expect_gt} \alias{expect_gte} \alias{expect_less_than} \alias{expect_lt} \alias{expect_lte} \alias{expect_more_than} \title{Expectation: is returned value less or greater than specified value?} \usage{ expect_less_than(object, expected, ..., info = NULL, label = NULL, expected.label = NULL) expect_lt(object, expected) expect_lte(object, expected) expect_more_than(object, expected, ..., info = NULL, label = NULL, expected.label = NULL) expect_gt(object, expected) expect_gte(object, expected) } \arguments{ \item{object}{object to test} \item{expected}{Expected value} \item{...}{other values passed to \code{\link{all.equal}}} \item{info}{extra information to be included in the message (useful when writing tests in loops).} \item{label}{For full form, label of expected object used in error messages. Useful to override default (deparsed expected expression) when doing tests in a loop. For short cut form, object label. When \code{NULL}, computed from deparsed object.} \item{expected.label}{Equivalent of \code{label} for shortcut form.} } \description{ This is useful for ensuring returned value is below a ceiling or above a floor. } \examples{ a <- 9 expect_less_than(a, 10) \dontrun{ expect_less_than(11, 10) } a <- 11 expect_more_than(a, 10) \dontrun{ expect_more_than(9, 10) } } \seealso{ Other expectations: \code{\link{equivalence}}, \code{\link{expect_equal}}, \code{\link{expect_equivalent}}, \code{\link{expect_identical}}; \code{\link{expect_equal_to_reference}}; \code{\link{expect_error}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_output}}, \code{\link{expect_warning}}, \code{\link{matching-expectations}}; \code{\link{expect_false}}, \code{\link{expect_true}}; \code{\link{expect_is}}; \code{\link{expect_named}}; \code{\link{expect_null}}; \code{\link{expect_silent}}; \code{\link{takes_less_than}} } testthat/man/equivalence.Rd0000644000175100001440000000534012606743734015511 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectations-equality.R \name{equivalence} \alias{equivalence} \alias{expect_equal} \alias{expect_equivalent} \alias{expect_identical} \title{Expectation: is the object equal to a value?} \usage{ expect_equal(object, expected, ..., info = NULL, label = NULL, expected.label = NULL) expect_equivalent(object, expected, info = NULL, label = NULL, expected.label = NULL) expect_identical(object, expected, info = NULL, label = NULL, expected.label = NULL) } \arguments{ \item{object}{object to test} \item{expected}{Expected value} \item{...}{other values passed to \code{\link{all.equal}}} \item{info}{extra information to be included in the message (useful when writing tests in loops).} \item{label}{For full form, label of expected object used in error messages. Useful to override default (deparsed expected expression) when doing tests in a loop. For short cut form, object label. When \code{NULL}, computed from deparsed object.} \item{expected.label}{Equivalent of \code{label} for shortcut form.} } \description{ \itemize{ \item \code{expect_identical} tests with \code{\link{identical}} \item \code{expect_equal} tests with \code{\link{all.equal}} \item \code{expect_equivalent} tests with \code{\link{all.equal}} and \code{check.attributes = FALSE} } } \examples{ a <- 10 expect_equal(a, 10) # Use equals() when testing for numeric equality sqrt(2) ^ 2 - 1 expect_equal(sqrt(2) ^ 2, 2) # Neither of these forms take floating point representation errors into # account \dontrun{ expect_true(sqrt(2) ^ 2 == 2) expect_identical(sqrt(2) ^ 2, 2) } # You can pass on additional arguments to all.equal: \dontrun{ # Test the ABSOLUTE difference is within .002 expect_equal(object = 10.01, expected = 10, tolerance = .002, scale = 1) # Test the RELATIVE difference is within .002 expectedValue <- 10 expect_equal(object = 10.01, expected = expectedValue, tolerance = 0.002, scale = expectedValue) } # expect_equivalent ignores attributes a <- b <- 1:3 names(b) <- letters[1:3] expect_equivalent(a, b) } \seealso{ Other expectations: \code{\link{expect-compare}}, \code{\link{expect_gt}}, \code{\link{expect_gte}}, \code{\link{expect_less_than}}, \code{\link{expect_lt}}, \code{\link{expect_lte}}, \code{\link{expect_more_than}}; \code{\link{expect_equal_to_reference}}; \code{\link{expect_error}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_output}}, \code{\link{expect_warning}}, \code{\link{matching-expectations}}; \code{\link{expect_false}}, \code{\link{expect_true}}; \code{\link{expect_is}}; \code{\link{expect_named}}; \code{\link{expect_null}}; \code{\link{expect_silent}}; \code{\link{takes_less_than}} } testthat/man/CheckReporter-class.Rd0000644000175100001440000000074412606743734017056 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-check.R \docType{class} \name{CheckReporter-class} \alias{CheckReporter} \alias{CheckReporter-class} \title{Check reporter: 13 line summary of problems} \arguments{ \item{...}{Arguments used to initialise class} } \description{ \code{R CMD check} displays only the last 13 lines of the result, so this report is design to ensure that you see something useful there. } \keyword{debugging} testthat/man/expectation.Rd0000644000175100001440000000140312606743734015527 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectation.r \name{expectation} \alias{expectation} \alias{is.expectation} \title{Expectation class.} \usage{ expectation(passed, failure_msg, success_msg = "unknown", srcref = NULL) is.expectation(x) } \arguments{ \item{passed}{a single logical value indicating whether the test passed (\code{TRUE}), failed (\code{FALSE}), or threw an error (\code{NA})} \item{failure_msg}{A text description of failure} \item{success_msg}{A text description of success} \item{srcref}{Source reference, if known} \item{x}{object to test for class membership} } \description{ Any expectation should return objects of this class - see the built in expectations for details. } \keyword{internal} testthat/man/test_env.Rd0000644000175100001440000000074212606743734015040 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-files.r \name{test_env} \alias{test_env} \title{Generate default testing environment.} \usage{ test_env() } \description{ We use a new environment which inherits from \code{\link{globalenv}}. In an ideal world, we'd avoid putting the global environment on the search path for tests, but it's not currently possible without losing the ability to load packages in tests. } \keyword{internal} testthat/man/test_that.Rd0000644000175100001440000000212012606743734015200 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-that.r \name{test_that} \alias{test_that} \title{Create a test.} \usage{ test_that(desc, code) } \arguments{ \item{desc}{test name. Names should be kept as brief as possible, as they are often used as line prefixes.} \item{code}{test code containing expectations} } \description{ A test encapsulates a series of expectations about small, self-contained set of functionality. Each test is contained in a \link{context} and contains multiple expectation generated by \code{\link{equivalence}}. } \details{ Tests are evaluated in their own environments, and should not affect global state. When run from the command line, tests return \code{NULL} if all expectations are met, otherwise it raises an error. } \examples{ test_that("trigonometric functions match identities", { expect_equal(sin(pi / 4), 1 / sqrt(2)) expect_equal(cos(pi / 4), 1 / sqrt(2)) expect_equal(tan(pi / 4), 1) }) # Failing test: \dontrun{ test_that("trigonometric functions match identities", { expect_equal(sin(pi / 4), 1) }) } } testthat/man/succeed.Rd0000644000175100001440000000063412606743734014624 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expect-that.r \name{succeed} \alias{succeed} \title{A default expectation that always succeeds.} \usage{ succeed(message = "Success has been forced") } \arguments{ \item{message}{a string to display.} } \description{ A default expectation that always succeeds. } \examples{ \dontrun{ test_that("this test fails", fail()) } } testthat/man/expect_true.Rd0000644000175100001440000000345512606743734015544 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectations.r \name{expect_true} \alias{expect_false} \alias{expect_true} \title{Expectation: is the object true/false?} \usage{ expect_true(object, info = NULL, label = NULL) expect_false(object, info = NULL, label = NULL) } \arguments{ \item{object}{object to test} \item{info}{extra information to be included in the message (useful when writing tests in loops).} \item{label}{object label. When \code{NULL}, computed from deparsed object.} } \description{ These are fall-back expectations that you can use when none of the other more specific expectations apply. The disadvantage is that you may get a less informative error message. } \details{ Attributes are ignored. } \examples{ expect_true(2 == 2) # Failed expectations will throw an error \dontrun{ expect_true(2 != 2) } expect_true(!(2 != 2)) # or better: expect_false(2 != 2) a <- 1:3 expect_true(length(a) == 3) # but better to use more specific expectation, if available expect_equal(length(a), 3) } \seealso{ \code{\link{is_false}} for complement Other expectations: \code{\link{equivalence}}, \code{\link{expect_equal}}, \code{\link{expect_equivalent}}, \code{\link{expect_identical}}; \code{\link{expect-compare}}, \code{\link{expect_gt}}, \code{\link{expect_gte}}, \code{\link{expect_less_than}}, \code{\link{expect_lt}}, \code{\link{expect_lte}}, \code{\link{expect_more_than}}; \code{\link{expect_equal_to_reference}}; \code{\link{expect_error}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_output}}, \code{\link{expect_warning}}, \code{\link{matching-expectations}}; \code{\link{expect_is}}; \code{\link{expect_named}}; \code{\link{expect_null}}; \code{\link{expect_silent}}; \code{\link{takes_less_than}} } testthat/man/auto_test_package.Rd0000644000175100001440000000102212606743734016663 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/auto-test.r \name{auto_test_package} \alias{auto_test_package} \title{Watches a package for changes, rerunning tests as appropriate.} \usage{ auto_test_package(pkg = ".", reporter = "summary") } \arguments{ \item{pkg}{path to package} \item{reporter}{test reporter to use} } \description{ Watches a package for changes, rerunning tests as appropriate. } \seealso{ \code{\link{auto_test}} for details on how method works } \keyword{debugging} testthat/man/takes_less_than.Rd0000644000175100001440000000222712606743734016360 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectations.r \name{takes_less_than} \alias{takes_less_than} \title{Expectation: does expression take less than a fixed amount of time to run?} \usage{ takes_less_than(amount) } \arguments{ \item{amount}{maximum duration in seconds} } \description{ This is useful for performance regression testing. } \seealso{ Other expectations: \code{\link{equivalence}}, \code{\link{expect_equal}}, \code{\link{expect_equivalent}}, \code{\link{expect_identical}}; \code{\link{expect-compare}}, \code{\link{expect_gt}}, \code{\link{expect_gte}}, \code{\link{expect_less_than}}, \code{\link{expect_lt}}, \code{\link{expect_lte}}, \code{\link{expect_more_than}}; \code{\link{expect_equal_to_reference}}; \code{\link{expect_error}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_output}}, \code{\link{expect_warning}}, \code{\link{matching-expectations}}; \code{\link{expect_false}}, \code{\link{expect_true}}; \code{\link{expect_is}}; \code{\link{expect_named}}; \code{\link{expect_null}}; \code{\link{expect_silent}} } \keyword{internal} testthat/man/expect_null.Rd0000644000175100001440000000244112606743734015531 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectations.r \name{expect_null} \alias{expect_null} \title{Expectation: is the object NULL?} \usage{ expect_null(object, info = NULL, label = NULL) } \arguments{ \item{object}{object to test} \item{info}{extra information to be included in the message (useful when writing tests in loops).} \item{label}{object label. When \code{NULL}, computed from deparsed object.} } \description{ Expectation: is the object NULL? } \examples{ expect_null(NULL) } \seealso{ Other expectations: \code{\link{equivalence}}, \code{\link{expect_equal}}, \code{\link{expect_equivalent}}, \code{\link{expect_identical}}; \code{\link{expect-compare}}, \code{\link{expect_gt}}, \code{\link{expect_gte}}, \code{\link{expect_less_than}}, \code{\link{expect_lt}}, \code{\link{expect_lte}}, \code{\link{expect_more_than}}; \code{\link{expect_equal_to_reference}}; \code{\link{expect_error}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_output}}, \code{\link{expect_warning}}, \code{\link{matching-expectations}}; \code{\link{expect_false}}, \code{\link{expect_true}}; \code{\link{expect_is}}; \code{\link{expect_named}}; \code{\link{expect_silent}}; \code{\link{takes_less_than}} } testthat/man/test_dir.Rd0000644000175100001440000000166012606743734015026 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-files.r \name{test_dir} \alias{test_dir} \title{Run all of the tests in a directory.} \usage{ test_dir(path, filter = NULL, reporter = "summary", env = test_env(), ...) } \arguments{ \item{path}{path to tests} \item{filter}{If not \code{NULL}, only tests with file names matching this regular expression will be executed. Matching will take on the file name after it has been stripped of \code{"test-"} and \code{".r"}.} \item{reporter}{reporter to use} \item{env}{environment in which to execute test suite.} \item{...}{Additional arguments passed to \code{grepl} to control filtering.} } \value{ the results as a "testthat_results" (list) } \description{ Test files start with \code{test} and are executed in alphabetical order (but they shouldn't have dependencies). Helper files start with \code{helper} and loaded before any tests are run. } testthat/man/setup_test_dir.Rd0000644000175100001440000000124512606743734016245 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-files.r \name{setup_test_dir} \alias{setup_test_dir} \title{Take care or finding the test files and sourcing the helpers.} \usage{ setup_test_dir(path, filter, env) } \arguments{ \item{path}{path to tests} \item{filter}{If not \code{NULL}, only tests with file names matching this regular expression will be executed. Matching will take on the file name after it has been stripped of \code{"test-"} and \code{".r"}.} \item{env}{environment in which to source the helpers} } \value{ the test file paths } \description{ Take care or finding the test files and sourcing the helpers. } testthat/man/with_mock.Rd0000644000175100001440000000326512606743734015200 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/mock.r \name{with_mock} \alias{with_mock} \title{Mock functions in a package.} \usage{ with_mock(..., .env = topenv()) } \arguments{ \item{...}{named parameters redefine mocked functions, unnamed parameters will be evaluated after mocking the functions} \item{.env}{the environment in which to patch the functions, defaults to the top-level environment. A character is interpreted as package name.} } \value{ The result of the last unnamed parameter } \description{ Executes code after temporarily substituting implementations of package functions. This is useful for testing code that relies on functions that are slow, have unintended side effects or access resources that may not be available when testing. } \details{ This works by using some C code to temporarily modify the mocked function \emph{in place}. On exit (regular or error), all functions are restored to their previous state. This is somewhat abusive of R's internals, and is still experimental, so use with care. Primitives (such as \code{\link[base]{interactive}}) cannot be mocked, but this can be worked around easily by defining a wrapper function with the same name. } \examples{ with_mock( all.equal = function(x, y, ...) TRUE, expect_equal(2 * 3, 4), .env = "base" ) with_mock( `base::identical` = function(x, y, ...) TRUE, `base::all.equal` = function(x, y, ...) TRUE, expect_equal(x <- 3 * 3, 6), expect_identical(x + 4, 9) ) throws_error()(expect_equal(3, 5)) throws_error()(expect_identical(3, 5)) } \references{ Suraj Gupta (2012): \href{http://obeautifulcode.com/R/How-R-Searches-And-Finds-Stuff}{How R Searches And Finds Stuff} } testthat/man/Reporter-class.Rd0000644000175100001440000000062712606743734016120 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter.r \docType{class} \name{Reporter-class} \alias{Reporter} \alias{Reporter-class} \title{Stub object for managing a reporter of tests.} \arguments{ \item{...}{Arguments used to initialise class} } \description{ Do not clone directly from this object - children should implement all methods. } \keyword{internal.} testthat/man/StopReporter-class.Rd0000644000175100001440000000135612606743734016766 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-stop.r \docType{class} \name{StopReporter-class} \alias{StopReporter} \alias{StopReporter-class} \title{Test reporter: stop on error.} \arguments{ \item{...}{Arguments used to initialise class} } \description{ The default reporter, executed when \code{expect_that} is run interactively. It responds by \link{stop}()ing on failures and doing nothing otherwise. This will ensure that a failing test will raise an error. } \details{ This should be used when doing a quick and dirty test, or during the final automated testing of R CMD check. Otherwise, use a reporter that runs all tests and gives you more context about the problem. } \keyword{debugging} testthat/man/evaluate_promise.Rd0000644000175100001440000000230112606743734016546 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/evaluate-promise.r \name{evaluate_promise} \alias{evaluate_promise} \title{Evaluate a promise, capturing all types of output.} \usage{ evaluate_promise(code, print = FALSE) } \arguments{ \item{code}{Code to evaluate. This should be an unevaluated expression.} \item{print}{If \code{TRUE} and the result of evaluating \code{code} is visible this will print the result, ensuring that the output of printing the object is included in the overall output} } \value{ A list containing \item{result}{The result of the function} \item{output}{A string containing all the output from the function} \item{warnings}{A character vector containing the text from each warning} \item{messages}{A character vector containing the text from each message} } \description{ This uses \code{\link[evaluate]{evaluate}} a promise, returning the result, test, messages and warnings that the code creates in a list. It is used to evaluate code for all test that tests, ensuring that (as much as possible) any spurious output is suppressed during the testing process. } \examples{ evaluate_promise({ print("1") message("2") warning("3") 4 }) } testthat/man/auto_test.Rd0000644000175100001440000000251112606743734015214 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/auto-test.r \name{auto_test} \alias{auto_test} \title{Watches code and tests for changes, rerunning tests as appropriate.} \usage{ auto_test(code_path, test_path, reporter = "summary", env = test_env()) } \arguments{ \item{code_path}{path to directory containing code} \item{test_path}{path to directory containing tests} \item{reporter}{test reporter to use} \item{env}{environment in which to execute test suite.} } \description{ The idea behind \code{auto_test} is that you just leave it running while you develop your code. Everytime you save a file it will be automatically tested and you can easily see if your changes have caused any test failures. } \details{ The current strategy for rerunning tests is as follows: \itemize{ \item if any code has changed, then those files are reloaded and all tests rerun \item otherwise, each new or modified test is run } In the future, \code{auto_test} might implement one of the following more intelligent alternatives: \itemize{ \item Use codetools to build up dependency tree and then rerun tests only when a dependency changes. \item Mimic ruby's autotest and rerun only failing tests until they pass, and then rerun all tests. } } \seealso{ \code{\link{auto_test_package}} } \keyword{debugging} testthat/man/MinimalReporter-class.Rd0000644000175100001440000000114112606743734017417 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-minimal.r \docType{class} \name{MinimalReporter-class} \alias{MinimalReporter} \alias{MinimalReporter-class} \title{Test reporter: minimal.} \arguments{ \item{...}{Arguments used to initialise class} } \description{ The minimal test reporter provides the absolutely minimum amount of information: whether each expectation has succeeded, failed or experienced an error. If you want to find out what the failures and errors actually were, you'll need to run a more informative test reporter. } \keyword{debugging} testthat/man/compare.Rd0000644000175100001440000000353512606743734014642 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/compare.r \name{compare} \alias{compare} \alias{compare.character} \alias{compare.default} \alias{compare.numeric} \title{Provide human-readable comparison of two objects} \usage{ compare(x, y, ...) \method{compare}{default}(x, y, ...) \method{compare}{character}(x, y, ..., max_diffs = 5, max_lines = 5, width = getOption("width")) \method{compare}{numeric}(x, y, ..., max_diffs = 10) } \arguments{ \item{x,y}{Objects to compare} \item{...}{Additional arguments used to control specifics of comparison} \item{max_diffs}{Maximum number of differences to show} \item{max_lines}{Maximum number of lines to show from each difference} \item{width}{Width of output device} } \description{ \code{compare} is similar to \code{\link[base]{all.equal}()}, but shows you examples of where the failures occured. } \examples{ # Character ----------------------------------------------------------------- x <- c("abc", "def", "jih") compare(x, x) y <- paste0(x, "y") compare(x, y) compare(letters, paste0(letters, "-")) x <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus tincidunt auctor. Vestibulum ac metus bibendum, facilisis nisi non, pulvinar dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " y <- "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Duis cursus tincidunt auctor. Vestibulum ac metus1 bibendum, facilisis nisi non, pulvinar dolor. Donec pretium iaculis nulla, ut interdum sapien ultricies a. " compare(x, y) compare(c(x, x), c(y, y)) # Numeric ------------------------------------------------------------------- x <- y <- runif(100) y[sample(100, 10)] <- 5 compare(x, y) x <- y <- 1:10 x[5] <- NA x[6] <- 6.5 compare(x, y) # Compare ignores minor numeric differences in the same way # as all.equal. compare(x, x + 1e-9) } testthat/man/expect_named.Rd0000644000175100001440000000414212606743734015643 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectations.r \name{expect_named} \alias{expect_named} \title{Expectation: does object have names?} \usage{ expect_named(object, expected, ignore.order = FALSE, ignore.case = FALSE, info = NULL, label = NULL) } \arguments{ \item{object}{object to test} \item{expected}{Character vector of expected names. Leave missing to match any names. Use \code{NULL} to check for absence of names.} \item{ignore.order}{If \code{TRUE}, sorts names before comparing to ignore the effect of order.} \item{ignore.case}{If \code{TRUE}, lowercases all names to ignore the effect of case.} \item{info}{extra information to be included in the message (useful when writing tests in loops).} \item{label}{object label. When \code{NULL}, computed from deparsed object.} \item{...}{Other arguments passed onto \code{has_names}.} } \description{ You can either check for the presence of names (leaving \code{expected} blank), specific names (by suppling a vector of names), or absence of names (with \code{NULL}). } \examples{ x <- c(a = 1, b = 2, c = 3) expect_named(x) expect_named(x, c("a", "b", "c")) # Use options to control sensitivity expect_named(x, c("B", "C", "A"), ignore.order = TRUE, ignore.case = TRUE) # Can also check for the absence of names with NULL z <- 1:4 expect_named(z, NULL) } \seealso{ Other expectations: \code{\link{equivalence}}, \code{\link{expect_equal}}, \code{\link{expect_equivalent}}, \code{\link{expect_identical}}; \code{\link{expect-compare}}, \code{\link{expect_gt}}, \code{\link{expect_gte}}, \code{\link{expect_less_than}}, \code{\link{expect_lt}}, \code{\link{expect_lte}}, \code{\link{expect_more_than}}; \code{\link{expect_equal_to_reference}}; \code{\link{expect_error}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_output}}, \code{\link{expect_warning}}, \code{\link{matching-expectations}}; \code{\link{expect_false}}, \code{\link{expect_true}}; \code{\link{expect_is}}; \code{\link{expect_null}}; \code{\link{expect_silent}}; \code{\link{takes_less_than}} } testthat/man/ListReporter-class.Rd0000644000175100001440000000102612606743734016746 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-list.r \docType{class} \name{ListReporter-class} \alias{ListReporter} \alias{ListReporter-class} \title{List reporter: gather all test results along with elapsed time and file information.} \arguments{ \item{...}{Arguments used to initialise class} } \description{ This reporter gathers all results, adding additional information such as test elapsed time, and test filename if available. Very useful for reporting. } \keyword{debugging} testthat/man/dir_state.Rd0000644000175100001440000000075012606743734015166 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/watcher.r \name{dir_state} \alias{dir_state} \title{Capture the state of a directory.} \usage{ dir_state(path, pattern = NULL, hash = TRUE) } \arguments{ \item{path}{path to directory} \item{pattern}{regular expression with which to filter files} \item{hash}{use hash (slow but accurate) or time stamp (fast but less accurate)} } \description{ Capture the state of a directory. } \keyword{internal} testthat/man/expect_equal_to_reference.Rd0000644000175100001440000000541412606743734020411 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectations-equality.R \name{expect_equal_to_reference} \alias{expect_equal_to_reference} \title{Expectation: is the object equal to a reference value stored in a file?} \usage{ expect_equal_to_reference(object, file, ..., info = NULL, label = NULL, expected.label = NULL) } \arguments{ \item{object}{object to test} \item{file}{The file name used to store the object. Should have an "rds" extension.} \item{...}{other values passed to \code{\link{expect_equal}}} \item{info}{extra information to be included in the message (useful when writing tests in loops).} \item{label}{For the full form, a label for the expected object, which is used in error messages. Useful to override the default (which is based on the file name), when doing tests in a loop. For the short-cut form, the object label, which is computed from the deparsed object by default.} \item{expected.label}{Equivalent of \code{label} for shortcut form.} } \description{ This expectation is equivalent to \code{\link{expect_equal}}, except that the expected value is stored in an RDS file instead of being specified literally. This can be helpful when the value is necessarily complex. If the file does not exist then it will be created using the value of the specified object, and subsequent tests will check for consistency against that generated value. The test can be reset by deleting the RDS file. } \details{ It is important to initialize the reference RDS file within the source package, most likely in the \code{tests/testthat/} directory. Testing spawned by \code{devtools::test()}, for example, will accomplish this. But note that testing spawned by \code{R CMD check} and \code{devtools::check()} will NOT. In the latter cases, the package source is copied to an external location before tests are run. The resulting RDS file will not make its way back into the package source and will not be available for subsequent comparisons. } \examples{ \dontrun{ expect_equal_to_reference(1, "one.rds") } } \seealso{ Other expectations: \code{\link{equivalence}}, \code{\link{expect_equal}}, \code{\link{expect_equivalent}}, \code{\link{expect_identical}}; \code{\link{expect-compare}}, \code{\link{expect_gt}}, \code{\link{expect_gte}}, \code{\link{expect_less_than}}, \code{\link{expect_lt}}, \code{\link{expect_lte}}, \code{\link{expect_more_than}}; \code{\link{expect_error}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_output}}, \code{\link{expect_warning}}, \code{\link{matching-expectations}}; \code{\link{expect_false}}, \code{\link{expect_true}}; \code{\link{expect_is}}; \code{\link{expect_named}}; \code{\link{expect_null}}; \code{\link{expect_silent}}; \code{\link{takes_less_than}} } testthat/man/TeamcityReporter-class.Rd0000644000175100001440000000104212606743734017610 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-teamcity.r \docType{class} \name{TeamcityReporter-class} \alias{TeamcityReporter} \alias{TeamcityReporter-class} \title{Test reporter: Teamcity format.} \arguments{ \item{...}{Arguments used to initialise class} } \description{ This reporter will output results in the Teamcity message format. For more information about Teamcity messages, see http://confluence.jetbrains.com/display/TCD7/Build+Script+Interaction+with+TeamCity } \keyword{debugging} testthat/man/SummaryReporter-class.Rd0000644000175100001440000000160112606743734017467 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-summary.r \docType{class} \name{SummaryReporter-class} \alias{SummaryReporter} \alias{SummaryReporter-class} \title{Test reporter: summary of errors.} \arguments{ \item{...}{Arguments used to initialise class} } \description{ This is the most useful reporting reporter as it lets you know both which tests have run successfully, as well as fully reporting information about failures and errors. It is the default reporting reporter used by \code{\link{test_dir}} and \code{\link{test_file}}. } \details{ You can use the \code{max_reports} field to control the maximum number of detailed reports produced by this reporter. This is useful when running with \code{\link{auto_test}} As an additional benefit, this reporter will praise you from time-to-time if all your tests pass. } \keyword{debugging} testthat/man/expect_silent.Rd0000644000175100001440000000237112606743734016057 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectations-silent.R \name{expect_silent} \alias{expect_silent} \title{Expect that code has no output, messages, or warnings.} \usage{ expect_silent(expr) } \arguments{ \item{expr}{Expression to evaluate} } \description{ Expect that code has no output, messages, or warnings. } \examples{ expect_silent("123") f <- function() { message("Hi!") warning("Hey!!") print("OY!!!") } \dontrun{ expect_silent(f()) } } \seealso{ Other expectations: \code{\link{equivalence}}, \code{\link{expect_equal}}, \code{\link{expect_equivalent}}, \code{\link{expect_identical}}; \code{\link{expect-compare}}, \code{\link{expect_gt}}, \code{\link{expect_gte}}, \code{\link{expect_less_than}}, \code{\link{expect_lt}}, \code{\link{expect_lte}}, \code{\link{expect_more_than}}; \code{\link{expect_equal_to_reference}}; \code{\link{expect_error}}, \code{\link{expect_match}}, \code{\link{expect_message}}, \code{\link{expect_output}}, \code{\link{expect_warning}}, \code{\link{matching-expectations}}; \code{\link{expect_false}}, \code{\link{expect_true}}; \code{\link{expect_is}}; \code{\link{expect_named}}; \code{\link{expect_null}}; \code{\link{takes_less_than}} } testthat/man/context.Rd0000644000175100001440000000106112606743734014670 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/context.r \name{context} \alias{context} \title{Describe the context of a set of tests.} \usage{ context(desc) } \arguments{ \item{desc}{description of context. Should start with a capital letter.} } \description{ A context defines a set of tests that test related functionality. Usually you will have one context per file, but you may have multiple contexts in a single file if you so choose. } \examples{ context("String processing") context("Remote procedure calls") } testthat/man/testthat.Rd0000644000175100001440000000167712606743734015061 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-that.r \docType{package} \name{testthat} \alias{testthat} \alias{testthat-package} \title{R package to make testing fun!} \description{ Try the example below. Have a look at the references and learn more from function documentation such as \code{\link{expect_that}}. } \details{ Software testing is important, but, in part because it is frustrating and boring, many of us avoid it. testthat is a new testing framework for R that is easy learn and use, and integrates with your existing workflow. } \examples{ library(testthat) a <- 9 expect_that(a, is_less_than(10)) expect_less_than(a, 10) } \references{ Wickham, H (2011). testthat: Get Started with Testing. \strong{The R Journal} \emph{3/1} 5-10. \url{http://journal.r-project.org/archive/2011-1/RJournal_2011-1_Wickham.pdf} \url{https://github.com/hadley/testthat} \url{http://adv-r.had.co.nz/Testing.html} } testthat/man/safe_digest.Rd0000644000175100001440000000072712606743734015471 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/watcher.r \name{safe_digest} \alias{safe_digest} \title{Compute a digest of a filename, returning NA if the file doesn't exist.} \usage{ safe_digest(path) } \arguments{ \item{filename}{filename to compute digest on} } \value{ a digest of the file, or NA if it doesn't exist. } \description{ Compute a digest of a filename, returning NA if the file doesn't exist. } \keyword{internal} testthat/man/not.Rd0000644000175100001440000000112412606743734014004 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expect-that.r \name{not} \alias{not} \title{Negate an expectation} \usage{ not(f) } \arguments{ \item{f}{an existing expectation function} } \description{ This negates an expectation, making it possible to express that you want the opposite of a standard expectation. This function is soft-deprecated and will be removed in a future version. } \examples{ x <- 1 expect_that(x, equals(1)) expect_that(x, not(equals(2))) \dontrun{ expect_that(x, equals(2)) expect_that(x, not(equals(1))) } } \keyword{internal} testthat/man/reporter-accessors.Rd0000644000175100001440000000112112606743734017026 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-zzz.r \name{reporter-accessors} \alias{get_reporter} \alias{reporter-accessors} \alias{set_reporter} \alias{with_reporter} \title{Get/set reporter; execute code in specified reporter.} \usage{ set_reporter(reporter) get_reporter() with_reporter(reporter, code) } \arguments{ \item{reporter}{test reporter to use} \item{code}{code block to execute} } \description{ Changes global reporter to that specified, runs code and the returns global reporter back to previous value. } \keyword{internal} testthat/man/source_dir.Rd0000644000175100001440000000107312606743734015345 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-files.r \name{source_dir} \alias{source_dir} \title{Load all source files in a directory.} \usage{ source_dir(path, pattern = "\\\\.[rR]$", env = test_env(), chdir = TRUE) } \arguments{ \item{path}{path to tests} \item{pattern}{regular expression used to filter files} \item{env}{environment in which to store results} \item{chdir}{change working directory to path?} } \description{ The expectation is that the files can be sourced in alphabetical order. } \keyword{internal} testthat/man/MultiReporter-class.Rd0000644000175100001440000000074312606743734017132 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/reporter-multi.r \docType{class} \name{MultiReporter-class} \alias{MultiReporter} \alias{MultiReporter-class} \title{Multi reporter: combine several reporters in one.} \arguments{ \item{...}{Arguments used to initialise class} } \description{ This reporter is useful to use several reporters at the same time, e.g. adding a custom reporter without removing the current one. } \keyword{debugging} testthat/man/watch.Rd0000644000175100001440000000175012606743734014317 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/watcher.r \name{watch} \alias{watch} \title{Watch a directory for changes (additions, deletions & modifications).} \usage{ watch(path, callback, pattern = NULL, hash = TRUE) } \arguments{ \item{path}{character vector of paths to watch. Omit trailing backslash.} \item{callback}{function called everytime a change occurs. It should have three parameters: added, deleted, modified, and should return TRUE to keep watching, or FALSE to stop.} \item{pattern}{file pattern passed to \code{\link{dir}}} \item{hash}{hashes are more accurate at detecting changes, but are slower for large files. When FALSE, uses modification time stamps} } \description{ This is used to power the \code{\link{auto_test}} and \code{\link{auto_test_package}} functions which are used to rerun tests whenever source code changes. } \details{ Use Ctrl + break (windows), Esc (mac gui) or Ctrl + C (command line) to stop the watcher. } testthat/man/source_test_helpers.Rd0000644000175100001440000000102412606743734017264 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-files.r \name{source_test_helpers} \alias{source_test_helpers} \title{Source the helper scripts if any.} \usage{ source_test_helpers(path, env = globalenv()) } \arguments{ \item{path}{path to tests} \item{env}{environment in which to execute test suite.} } \description{ Helper scripts are R scripts accompanying test scripts but prefixed by \code{helper}. These scripts are sourced only one time in test environment. } \keyword{internal} testthat/man/testthat_results.Rd0000644000175100001440000000106212606743734016626 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-results.r \name{testthat_results} \alias{testthat_results} \title{Create a `testthat_results` object from the test results as stored in the ListReporter results field.} \usage{ testthat_results(results) } \arguments{ \item{results}{a list as stored in ListReporter} } \value{ its list argument as a `testthat_results` object } \description{ Create a `testthat_results` object from the test results as stored in the ListReporter results field. } \seealso{ ListReporter } testthat/man/skip.Rd0000644000175100001440000000243712606743734014162 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/utils.r \name{skip} \alias{skip} \alias{skip_if_not_installed} \alias{skip_on_appveyor} \alias{skip_on_cran} \alias{skip_on_os} \alias{skip_on_travis} \title{Skip a test.} \usage{ skip(message) skip_if_not_installed(pkg) skip_on_cran() skip_on_os(os) skip_on_travis() skip_on_appveyor() } \arguments{ \item{message}{A message describing why the test was skipped.} \item{pkg}{Name of package to check for} \item{os}{Character vector of system names. Supported values are \code{"windows"}, \code{"mac"}, \code{"linux"} and \code{"solaris"}.} } \description{ This function allows you to skip a test if it's not currently available. This will produce an informative message, but will not cause the test suite to fail. } \section{Helpers}{ \code{skip_on_cran()} skips tests on CRAN, using the \code{NOT_CRAN} environment variable set by devtools. \code{skip_on_travis()} skips tests on travis by inspecting the \code{TRAVIS} environment variable. \code{skip_on_appveyor()} skips tests on appveyor by inspecting the \code{APPVEYOR} environment variable. \code{skip_if_not_installed()} skips a tests if a package is not installed (useful for suggested packages). } \examples{ if (FALSE) skip("No internet connection") } testthat/man/matching-expectations.Rd0000644000175100001440000000713412606743734017511 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectations-matches.R \name{matching-expectations} \alias{expect_error} \alias{expect_match} \alias{expect_message} \alias{expect_output} \alias{expect_warning} \alias{matching-expectations} \title{Expectation: does string/output/message/warning/error match a regular expression?} \usage{ expect_match(object, regexp, ..., all = TRUE, info = NULL, label = NULL) expect_output(object, regexp, ..., info = NULL, label = NULL) expect_error(object, regexp = NULL, ..., info = NULL, label = NULL) expect_warning(object, regexp = NULL, ..., all = FALSE, info = NULL, label = NULL) expect_message(object, regexp = NULL, ..., all = FALSE, info = NULL, label = NULL) } \arguments{ \item{object}{object to test} \item{regexp}{regular expression to test against. If omitted, just asserts that code produces some output, messsage, warning or error. Alternatively, you can specify \code{NA} to indicate that there should be no output, messages, warnings or errors.} \item{...}{Additional arguments passed on to \code{\link{grepl}}, e.g. \code{ignore.case} or \code{fixed}.} \item{all}{should all elements of actual value match \code{regexp} (TRUE), or does only one need to match (FALSE)} \item{info}{extra information to be included in the message (useful when writing tests in loops).} \item{label}{object label. When \code{NULL}, computed from deparsed object.} } \description{ Expectation: does string/output/message/warning/error match a regular expression? } \examples{ expect_match("Testing is fun", "fun") expect_match("Testing is fun", "f.n") # Output -------------------------------------------------------------------- str(mtcars) expect_output(str(mtcars), "32 obs") expect_output(str(mtcars), "11 variables") # You can use the arguments of grepl to control the matching expect_output(str(mtcars), "11 VARIABLES", ignore.case = TRUE) expect_output(str(mtcars), "$ mpg", fixed = TRUE) # Messages ------------------------------------------------------------------ f <- function(x) { if (x < 0) message("*x* is already negative") -x } expect_message(f(-1)) expect_message(f(-1), "already negative") expect_message(f(1), NA) # You can use the arguments of grepl to control the matching expect_message(f(-1), "*x*", fixed = TRUE) expect_message(f(-1), "NEGATIVE", ignore.case = TRUE) # Warnings -------------------------------------------------------------------- f <- function(x) { if (x < 0) warning("*x* is already negative") -x } expect_warning(f(-1)) expect_warning(f(-1), "already negative") expect_warning(f(1), NA) # You can use the arguments of grepl to control the matching expect_warning(f(-1), "*x*", fixed = TRUE) expect_warning(f(-1), "NEGATIVE", ignore.case = TRUE) # Errors -------------------------------------------------------------------- f <- function() stop("My error!") expect_error(f()) expect_error(f(), "My error!") # You can use the arguments of grepl to control the matching expect_error(f(), "my error!", ignore.case = TRUE) } \seealso{ Other expectations: \code{\link{equivalence}}, \code{\link{expect_equal}}, \code{\link{expect_equivalent}}, \code{\link{expect_identical}}; \code{\link{expect-compare}}, \code{\link{expect_gt}}, \code{\link{expect_gte}}, \code{\link{expect_less_than}}, \code{\link{expect_lt}}, \code{\link{expect_lte}}, \code{\link{expect_more_than}}; \code{\link{expect_equal_to_reference}}; \code{\link{expect_false}}, \code{\link{expect_true}}; \code{\link{expect_is}}; \code{\link{expect_named}}; \code{\link{expect_null}}; \code{\link{expect_silent}}; \code{\link{takes_less_than}} } testthat/man/find_test_scripts.Rd0000644000175100001440000000107312606743734016735 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/test-files.r \name{find_test_scripts} \alias{find_test_scripts} \title{Find the test files.} \usage{ find_test_scripts(path, filter = NULL, invert = FALSE, ...) } \arguments{ \item{path}{path to tests} \item{filter}{cf \code{\link{test_dir}}} \item{invert}{If \sQuote{TRUE} return files which do \emph{not} match.} \item{...}{Additional arguments passed to \code{grepl} to control filtering.} } \value{ the test file paths } \description{ Find the test files. } \keyword{internal} testthat/man/oldskool.Rd0000644000175100001440000000273612606743734015044 0ustar hornikusers% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/expectations-equality.R, R/expectations-matches.R, R/expectations-old.R, R/expectations.r \name{equals} \alias{equals} \alias{equals_reference} \alias{gives_warning} \alias{has_names} \alias{is_a} \alias{is_equivalent_to} \alias{is_false} \alias{is_identical_to} \alias{is_less_than} \alias{is_more_than} \alias{is_null} \alias{is_true} \alias{matches} \alias{oldskool} \alias{prints_text} \alias{shows_message} \alias{throws_error} \title{Old-style expectations.} \usage{ equals(expected, label = NULL, ...) is_equivalent_to(expected, label = NULL) is_identical_to(expected, label = NULL) equals_reference(file, label = NULL, ...) matches(regexp, all = TRUE, ...) prints_text(regexp, ...) throws_error(regexp = NULL, ...) gives_warning(regexp = NULL, all = FALSE, ...) shows_message(regexp = NULL, all = FALSE, ...) is_a(class) is_true() is_false() is_null() has_names(expected, ignore.order = FALSE, ignore.case = FALSE) is_less_than(expected, label = NULL, ...) is_more_than(expected, label = NULL, ...) } \description{ Initial testthat used a style of testing that looked like \code{expect_that(a, equals(b)))} this allowed expectations to read like English sentences, but was verbose and a bit too cutesy. This style will continue to work but has been soft-deprecated - it is no longer documented, and new expectations will only use the new style \code{expect_equal(a, b)}. } \keyword{internal} testthat/LICENSE0000644000175100001440000000007212406322470013134 0ustar hornikusersYEAR: 2013-2014 COPYRIGHT HOLDER: Hadley Wickham; RStudio