diff --git a/R/CBA.R b/R/CBA.R index e28d2e3..9e609df 100644 --- a/R/CBA.R +++ b/R/CBA.R @@ -17,7 +17,7 @@ #' of form \code{class ~ .} or \code{class ~ predictor1 + predictor2}. #' @param data A data.frame or a transaction set containing the training data. #' Data frames are automatically discretized and converted to transactions. -#' @param pruning Pruning strategy used: "M1" or "M2". +#' @param pruning Pruning strategy used: "M1" or "M2". NULL to skip pruning step. #' @param parameter,control Optional parameter and control lists for apriori. #' @param balanceSupport balanceSupport parameter passed to #' \code{\link{mineCARs}} function. @@ -79,8 +79,10 @@ CBA <- function(formula, data, pruning = "M1", verbose = verbose, ...) if(verbose) cat("\nPruning CARs...\n") - if(pruning == "M1") rulebase <- pruneCBA_M1(formula, rulebase, trans) - else rulebase <- pruneCBA_M2(formula, rulebase, trans) + if(!is.null(pruning)) { + if(pruning == "M1") rulebase <- pruneCBA_M1(formula, rulebase, trans) + else rulebase <- pruneCBA_M2(formula, rulebase, trans) + } if(verbose) cat("CARs left:", length(rulebase), "\n") diff --git a/man/CBA.Rd b/man/CBA.Rd index 9eac4a9..c8dfe18 100644 --- a/man/CBA.Rd +++ b/man/CBA.Rd @@ -30,7 +30,7 @@ of form \code{class ~ .} or \code{class ~ predictor1 + predictor2}.} \item{data}{A data.frame or a transaction set containing the training data. Data frames are automatically discretized and converted to transactions.} -\item{pruning}{Pruning strategy used: "M1" or "M2".} +\item{pruning}{Pruning strategy used: "M1" or "M2". NULL to skip pruning step.} \item{parameter, control}{Optional parameter and control lists for apriori.} diff --git a/tests/testthat/test-CBA.R b/tests/testthat/test-CBA.R index cb8cce0..f4618ad 100644 --- a/tests/testthat/test-CBA.R +++ b/tests/testthat/test-CBA.R @@ -1,15 +1,30 @@ library("testthat") library("arulesCBA") -data("iris") context("CBA") -cba_classifier <- CBA(Species ~ ., iris, supp = 0.05, conf = 0.9, pruning = "M1") +data("iris") +formula <- Species ~ . + +trans <- prepareTransactions(formula, iris, disc.method = "mdlp") +rulebase <- mineCARs( + formula = Species ~ ., + transactions = trans, + supp = 0.05, + conf = 0.9 +) +n_rules <- length(rulebase) + +cba_no_pruning <- CBA(formula, iris, supp = 0.05, conf = 0.9, pruning = NULL) +expect_equal(length(rules(cba_no_pruning)), n_rules) + +cba_classifier <- CBA(formula, iris, supp = 0.05, conf = 0.9, pruning = "M1") expect_equal(length(rules(cba_classifier)), 8L) results <- predict(cba_classifier, iris) -expect_equal(results[1], factor("setosa", - levels = c("setosa", "versicolor", "virginica"))) +expect_equal( + results[1], factor("setosa", levels = c("setosa", "versicolor", "virginica")) +) results <- predict(cba_classifier, head(iris, n = 5)) expect_equal(length(results), 5L) @@ -25,7 +40,7 @@ results <- predict(cba_classifier, head(iris, n = 5)) expect_equal(length(results), 5L) # FIXME: We need to check what the output of M2 should be -cba_classifier_M2 <- CBA(Species ~ ., iris, supp = 0.05, conf = 0.9, pruning = "M2") +cba_classifier_M2 <- CBA(formula, iris, supp = 0.05, conf = 0.9, pruning = "M2") # FIXME: there is a bug in totalError calculation in M2 #expect_equal(length(rules(cba_classifier_M2)), 8L)