From 5ce0bdf2256f2ee8d82834cb63c417699c8ffc51 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Mon, 8 Sep 2025 12:51:16 +0000 Subject: [PATCH 01/49] code modification to jjcsformats --- R/jjcsformats.R | 476 ++++++++++++++++++++++-------------------------- 1 file changed, 215 insertions(+), 261 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 428333e5..73c6985b 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -1,88 +1,89 @@ -#' @title Function factory for xx style formatting -#' @description A function factory to generate formatting functions for value -#' formatting that support the xx style format and control the rounding method. +#' @name jjcsformat_xx +#' @title Utility for specifying custom formats +#' +#' @description #' -#' @param roundmethod (`string`)\cr choice of rounding methods. Options are: -#' * `sas`: the underlying rounding method is `tidytlg::roundSAS`, where \cr -#' roundSAS comes from this Stack Overflow post https://stackoverflow.com/questions/12688717/round-up-from-5 -#' * `iec`: the underlying rounding method is `round` +#' Utility for specifying custom formats that can be used as a format in `formatters::format_value` #' -#' @param na_str_dflt (`character`)\cr Character to represent NA value -#' @param replace_na_dflt (`logical(1)`)\cr Should an `na_string` of "NA" within +#' @param str The formatting that is required specified as a text string, eg "xx.xx" +#' @param na_str_dflt Character to represent NA value +#' @param replace_na_dflt logical(1). Should an `na_string` of "NA" within #' the formatters framework be overridden by `na_str_default`? Defaults to #' `TRUE`, as a way to have a different default na string behavior from the #' base `formatters` framework. -#' @return `format_xx_fct()` format function that can be used in rtables formatting calls +#' @return Either a supported format string, or a formatting function that can be +#' used as format in `formatters::format_value` +#' @family JJCS formatting functions +#' @rdname jjcsformat_xx #' @export -#' -#' @family JJCS formats #' @examples -#' jjcsformat_xx_SAS <- format_xx_fct(roundmethod = "sas") -#' jjcsformat_xx <- jjcsformat_xx_SAS -#' rcell(c(1.453), jjcsformat_xx("xx.xx")) -#' rcell(c(), jjcsformat_xx("xx.xx")) -#' rcell(c(1.453, 2.45638), jjcsformat_xx("xx.xx (xx.xxx)")) -format_xx_fct <- function(roundmethod = c("sas", "iec"), na_str_dflt = "NE", - replace_na_dflt = TRUE) { - roundmethod <- match.arg(roundmethod) - - if (roundmethod == "sas") { - roundfunc <- tidytlg::roundSAS - } else { - roundfunc <- round - } - - fnct <- function(str, na_str = na_str_dflt) { - if (grepl("xxx.", str, fixed = TRUE)) { - stop( - "Error: jjcs_format_xx: do not use xxx. in input str, replace by xx. instead." - ) +#' value <- c(1.65, 8.645) +#' fmt <- jjcsformat_xx("xx.x") +#' is.function(fmt) +#' fmt +#' format_value(value[1], fmt, round_type = "sas") +#' format_value(value[1], fmt, round_type = "iec") +#' fmt(value[1]) +#' +#' fmt2 <- jjcsformat_xx("xx.x (xx.xxx)") +#' is.function(fmt2) +#' value <- c(1.65, 8.645) +#' format_value(value, fmt2, round_type = "sas") +#' format_value(value, fmt2, round_type = "iec") +#' #only possible when resulting format is a function +#' fmt2(value, round_type = "sas") +#' +#' value <- c(1.65, NA) +#' format_value(value, fmt2, round_type = "iec", na_str = c("ne1", "ne2")) +#' fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) + + +jjcsformat_xx <- function(str, + na_str = na_str_dflt, + na_str_dflt = "NE", + replace_na_dflt = TRUE) { + + if (grepl("xxx.", str, fixed = TRUE)) { + stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") } + + if (is_valid_format(str)) { + rtable_format <- str + } else { + if (!grepl("xx", str, fixed = TRUE)) { - stop("Error: jjcs_format_xx: input str must contain xx.") + stop("Error: jjcsformat_xx input str must contain xx.") } - positions <- gregexpr( - pattern = "xx\\.?x*", - text = str, - perl = TRUE - ) + positions <- gregexpr(pattern = "xx\\.?x*", + text = str, + perl = TRUE) x_positions <- regmatches(x = str, m = positions)[[1]] - ### str is splitted into pieces as xx. xx xx.xxx - ### xx is no rounding - ### xx. rounding to integer - ### xx.x rounding to 1 decimal, etc - - no_round <- function(x, na_str = na_str_dflt) { - if (is.na(x)) { - return(na_str) - } else { - return(x) - } - } - - roundings <- lapply(X = x_positions, function(x) { - y <- strsplit(split = "\\.", x = x)[[1]] - ### "xx.x" will result in c("xx","x") - ### "xx." will result in "xx" - ### "xx" will remain "xx" - - if (x == "xx") { - rounding <- no_round - } else { - rounding <- function(x, na_str = na_str_dflt) { - if (is.na(x)) { - return(na_str) - } else { - format( - roundfunc(x, digits = ifelse(length(y) > 1, nchar(y[2]), 0)), - nsmall = ifelse(length(y) > 1, nchar(y[2]), 0) - ) - } + + roundings <- lapply(X = x_positions, function(fmt) { + rounding <- function(x, + na_str, + round_type) { + if (fmt %in% formatters:::formats_1d) { + res <- format_value(x, + fmt, + na_str = na_str[1], + round_type = round_type) + } else if (fmt %in% c("xx.xxxxx", "xx.xxxxxx", "xx.xxxxxxx")) { + # p-value fmt sometimes might need more digits + d <- nchar(sub(".*\\.", "", fmt)) + res <- round_fmt(x, digits = d, round_type = round_type, na_str = na_str[1]) } + res } return(rounding) }) - rtable_format <- function(x, output, na_str = na_str_dflt) { + + rtable_format <- + function( + x, + output, + round_type = c("sas", "iec"), + na_str = na_str_dflt) { if (anyNA(na_str) || (replace_na_dflt && any(na_str == "NA"))) { na_inds <- which(is.na(na_str) | (replace_na_dflt & na_str == "NA")) na_str[na_inds] <- rep(na_str_dflt, length.out = length(na_str))[na_inds] @@ -91,38 +92,29 @@ format_xx_fct <- function(roundmethod = c("sas", "iec"), na_str_dflt = "NE", return(NULL) } else if (!length(positions[[1]]) == length(x)) { stop( - "Error: input str in call to jjcs_format_xx must contain same number of xx as the number of stats." + "Error: input str in call to jjcsformat_xx must contain same number of xx as the number of stats." ) } - - values <- Map(y = x, fun = roundings, na_str = na_str, function(y, fun, na_str) fun(y, na_str = na_str)) + + round_type <- match.arg(round_type) + + values <- Map(y = x, fun = roundings, na_str = na_str, function(y, fun, na_str, output) + fun(y, na_str = na_str, round_type = round_type)) + regmatches(x = str, m = positions)[[1]] <- values return(str) } + return(rtable_format) - } - return(fnct) + } + } -jjcsformat_xx_SAS <- format_xx_fct(roundmethod = "sas") -jjcsformat_xx_R <- format_xx_fct(roundmethod = "iec") - -### if we ever decide to switch rounding method, we just have to update jjcsformat_xx here -#' @title Formatting of values -#' @name jjcsformat_xx -#' @description jjcs formatting function -#' @param str (`character`)\cr the formatting that is required specified as a text string, eg "xx.xx" -#' @param na_str (`character`)\cr Na string that will be passed from `formatters` into -#' our formatting functions. -#' @return a formatting function with `"sas"`-style rounding. -#' @export -jjcsformat_xx <- jjcsformat_xx_SAS - -#' @name count_fraction -#' @title Formatting count and fraction values +#' @name count and fraction related formatting functions +#' @title Formatting functions for count and fraction, and for count denominator and fraction values #' #' @description #' @@ -130,191 +122,146 @@ jjcsformat_xx <- jjcsformat_xx_SAS #' consideration when count is 0, or fraction is 1. #' \cr See also: [tern::format_count_fraction_fixed_dp()] #' -#' @inheritParams format_xx_fct #' @param x (`numeric vector`)\cr Vector with elements `num` and `fraction` or `num`, `denom` and `fraction`. #' @param d (`numeric(1)`)\cr Number of digits to round fraction to (default = 1) #' @param ... Additional arguments passed to other methods. -#' @return A string in the format `count / denom (ratio percent)`. If `count` +#' @return A formatting function to format input into string in the format `count / denom (ratio percent)`. If `count` #' is 0, the format is `0`. If fraction is >0.99, the format is #' `count / denom (>99.9 percent)` -#' @family JJCS formats +#' @family JJCS formatting functions #' @rdname count_fraction #' @export -#' @examples -#' jjcsformat_count_fraction(c(7, 0.7)) -#' jjcsformat_count_fraction(c(70000, 0.9999999)) -#' jjcsformat_count_fraction(c(70000, 1)) -jjcsformat_count_fraction <- function( + +jjcsformat_cnt_den_fract_fct <- function(d = 1, + type = c("count_fraction", "count_denom_fraction", "fraction_count_denom"), + verbose = FALSE + ){ + type <- match.arg(type) + + function( x, - d = 1, - roundmethod = c("sas", "iec"), + round_type = c("sas", "iec"), + output, ...) { - roundmethod <- match.arg(roundmethod) - attr(x, "label") <- NULL - if (any(is.na(x))) { - return("-") - } - - checkmate::assert_vector(x) - checkmate::assert_integerish(x[1]) - assert_proportion_value( - x[2], - include_boundaries = TRUE - ) - - fraction <- x[2] - - if (isTRUE(all.equal(fraction, 1))) fraction <- 1 - - if (roundmethod == "sas") { - fmtpct <- format(tidytlg::roundSAS(fraction * 100, d), nsmall = d) - } else { - fmtpct <- format(round(fraction * 100, d), nsmall = d) - } - - result <- if (x[1] == 0) { - "0" - } else if (fraction == 1) { - ## per conventions still report as 100.0% - paste0(x[1], " (100.0%)") - } else if (fmtpct == format(0, nsmall = d)) { - # else if (100*x[2] < 10**(-d)) { - ### example pct = 0.09999 ### <0.1% (even if fmtpct == 0.1, - # but the actual value of pct <0.1) - paste0(x[1], " (<", 10**(-d), "%)") - } else if (fmtpct == format(100, nsmall = d)) { - # else if (100*x[2] > 100-10**(-d)) { - ### example pct = 99.90001 ### >99.9% (even if fmtpct == 99.9, - # but the actual value of pct >99.9) - paste0(x[1], " (>", 100 - 10**(-d), "%)") - } else { - paste0(x[1], " (", fmtpct, "%)") - } + attr(x, "label") <- NULL + if (any(is.na(x))) { + return("-") + } + + round_type <- match.arg(round_type) + + checkmate::assert_vector(x) + count <- x[1] + checkmate::assert_integerish(count) + + fraction <- switch(type, + "count_fraction" = x[2], + "count_denom_fraction" = x[3], + "fraction_count_denom" = x[3]) + + + assert_proportion_value( + fraction, + include_boundaries = TRUE + ) + + if (isTRUE(all.equal(fraction, 1))) fraction <- 1 + + if (type == "count_fraction") fmt_cd <- format_value(x = count, format = "xx") + if (type %in% c("count_denom_fraction", "fraction_count_denom")) { + denom <- x[2] + checkmate::assert_integerish(denom) + fmt_cd <- paste0(count, "/", denom) + } + + if (verbose) print(paste0("round_type used: ", round_type)) + + fmtpct <- format_value(100*fraction, + format = paste0("xx.", strrep("x", times = d)), + output = "ascii", + round_type = round_type) + + + if (type %in% c("count_fraction", "count_denom_fraction")){ + result <- if (count == 0) { + if (type == "count_fraction") "0" + else paste0(fmt_cd, " (", fmtpct, "%)") + } else if (fraction == 1) { + ## per conventions still report as 100.0% + paste0(fmt_cd, " (100.0%)") + } else if (fmtpct == format(0, nsmall = d)) { + # else if (100*x[2] < 10**(-d)) { + ### example pct = 0.09999 ### <0.1% (even if fmtpct == 0.1, + # but the actual value of pct <0.1) + paste0(fmt_cd, " (<", 10**(-d), "%)") + } else if (fmtpct == format(100, nsmall = d)) { + # else if (100*x[2] > 100-10**(-d)) { + ### example pct = 99.90001 ### >99.9% (even if fmtpct == 99.9, + # but the actual value of pct >99.9) + paste0(fmt_cd, " (>", 100 - 10**(-d), "%)") + } else { + paste0(fmt_cd, " (", fmtpct, "%)") + } + } else { + # type == fraction_count_denom + result <- if (count == 0) { + "0" + } else if (fraction == 1) { + ## per conventions still report as 100.0% + paste0("100.0% (", fmt_cd, ")") + } else if (fmtpct == format(0, nsmall = d)) { + # else if (100*x[2] < 10**(-d)) { + ### example pct = 0.09999 ### <0.1% (even if fmtpct == 0.1, + # but the actual value of pct <0.1) + paste0("<", 10**(-d), "%) (", fmt_cd, ")") + } else if (fmtpct == format(100, nsmall = d)) { + # else if (100*x[2] > 100-10**(-d)) { + ### example pct = 99.90001 ### >99.9% (even if fmtpct == 99.9, + # but the actual value of pct >99.9) + paste0(">", 100 - 10**(-d), "% (", fmt_cd, ")") + } else { + paste0(fmtpct, "% (", fmt_cd, ")") + } + } + return(result) + } } -#' @title Formatting count, denominator and fraction values. -#' -#' @inheritParams count_fraction -#' @param ... Additional arguments passed to other methods. +#' @rdname count_fraction #' @export -#' @rdname count_denom_fraction -#' @return `x`, formatted into a string with the appropriate -#' format and `d` digits of precision. #' @examples +#' +#' jjcsformat_count_fraction(c(7, 0.7)) +#' jjcsformat_count_fraction(c(70000, 70000 / 70001)) +#' jjcsformat_count_fraction(c(235, 235 / 235)) +#' fmt <- jjcsformat_cnt_den_fract_fct(type = "count_fraction", d = 2) +#' fmt(c(23, 23 / 235)) +jjcsformat_count_fraction <- jjcsformat_cnt_den_fract_fct(type = "count_fraction") + +#' @rdname count_fraction +#' @export +#' @examples +#' #' jjcsformat_count_denom_fraction(c(7, 10, 0.7)) #' jjcsformat_count_denom_fraction(c(70000, 70001, 70000 / 70001)) #' jjcsformat_count_denom_fraction(c(235, 235, 235 / 235)) -jjcsformat_count_denom_fraction <- function( - x, - d = 1, - roundmethod = c("sas", "iec"), - ...) { - roundmethod <- match.arg(roundmethod) - attr(x, "label") <- NULL - if (any(is.na(x))) { - return("-") - } - checkmate::assert_vector(x) - checkmate::assert_integerish(x[1]) - assert_proportion_value( - x[3], - include_boundaries = TRUE - ) - - fraction <- x[3] - if (x[2] == x[1]) fraction <- 1 - - fmt_x12 <- paste0(x[1], "/", x[2]) - - if (roundmethod == "sas") { - fmtpct <- format(tidytlg::roundSAS(fraction * 100, d), nsmall = d) - } else { - fmtpct <- format(round(fraction * 100, d), nsmall = d) - } +#' fmt <- jjcsformat_cnt_den_fract_fct(type = "count_denom_fraction", d = 2) +#' fmt(c(23, 235, 23 / 235)) +jjcsformat_count_denom_fraction <- jjcsformat_cnt_den_fract_fct(type = "count_denom_fraction") - result <- if (x[1] == 0) { - # "0" - # same as in general situation - paste0(fmt_x12, " (", fmtpct, "%)") - } else if (100 * fraction == 100) { - paste0(fmt_x12, " (100.0%)") - } else if (100 * fraction < 10**(-d)) { - ### example pct = 0.09999 ### <0.1% (even if fmtpct == 0.1, but the actual value of pct <0.1) - paste0(fmt_x12, " (<", 10**(-d), "%)") - } else if (100 * fraction > 100 - 10**(-d)) { - ### example pct = 99.90001 ### >99.9% (even if fmtpct == 99.9, but the actual value of pct >99.9) - paste0(fmt_x12, " (>", 100 - 10**(-d), "%)") - } else { - paste0(fmt_x12, " (", fmtpct, "%)") - } - return(result) -} - -#' @title Formatting fraction, count and denominator values. -#' -#' @details -#' Formats a 3-dimensional value such that percent values -#' near 0 or 100% are formatted as .e.g, `"<0.1%"` and -#' `">99.9%"`, where the cutoff is controlled by `d`, and -#' formatted as `"xx.x% (xx/xx)"` otherwise, with the -#' precision of the percent also controlled by `d`. -#' -#' @inheritParams count_fraction -#' @param ... Additional arguments passed to other methods. +#' @rdname count_fraction #' @export -#' @rdname fraction_count_denom -#' @return `x` formatted as a string with `d` digits of precision, -#' with special cased values as described in Details above. #' @examples +#' #' jjcsformat_fraction_count_denom(c(7, 10, 0.7)) #' jjcsformat_fraction_count_denom(c(70000, 70001, 70000 / 70001)) #' jjcsformat_fraction_count_denom(c(235, 235, 235 / 235)) -jjcsformat_fraction_count_denom <- function( - x, - d = 1, - roundmethod = c("sas", "iec"), - ...) { - roundmethod <- match.arg(roundmethod) - attr(x, "label") <- NULL - if (any(is.na(x))) { - return("-") - } - checkmate::assert_vector(x) - checkmate::assert_integerish(x[1]) - assert_proportion_value( - x[3], - include_boundaries = TRUE - ) +#' fmt <- jjcsformat_cnt_den_fract_fct(type = "fraction_count_denom", d = 2) +#' fmt(c(23, 235, 23 / 235)) +jjcsformat_fraction_count_denom <- jjcsformat_cnt_den_fract_fct(type = "fraction_count_denom") - fraction <- x[3] - if (x[2] == x[1]) fraction <- 1 - fmt_x12 <- paste0(x[1], "/", x[2]) - - if (roundmethod == "sas") { - fmtpct <- format(tidytlg::roundSAS(fraction * 100, d), nsmall = d) - } else { - fmtpct <- format(round(fraction * 100, d), nsmall = d) - } - - result <- if (x[1] == 0) { - # "0" - # same as in general situation - paste0("(", fmt_x12, ")") - } else if (100 * fraction == 100) { - paste0("100.0%", " (", fmt_x12, ")") - } else if (100 * fraction < 10**(-d)) { - ### example pct = 0.09999 ### <0.1% (even if fmtpct == 0.1, but the actual value of pct <0.1) - paste0("<", 10**(-d), "%", " (", fmt_x12, ")") - } else if (100 * fraction > 100 - 10**(-d)) { - ### example pct = 99.90001 ### >99.9% (even if fmtpct == 99.9, but the actual value of pct >99.9) - paste0(">", 100 - 10**(-d), "%", " (", fmt_x12, ")") - } else { - paste0(fmtpct, "%", " (", fmt_x12, ")") - } - return(result) -} #' @title Function factory for p-value formatting #' @@ -330,7 +277,7 @@ jjcsformat_fraction_count_denom <- function( #' For example, 0.0048 is not rounded to 0.005 but stays at 0.0048 if `alpha = 0.005` #' is set. #' -#' @family JJCS formats +#' @rdname jjcsformat_xx #' @export #' #' @examples @@ -346,27 +293,32 @@ jjcsformat_fraction_count_denom <- function( jjcsformat_pval_fct <- function(alpha = 0.05) { checkmate::assert_number(alpha, lower = 0, upper = 1) - function(x, ...) { + function(x, round_type = c("iec", "sas"), na_str, ...) { + round_type <- match.arg(round_type) checkmate::assert_number( x, lower = 0, upper = 1 + .Machine$double.eps, # Be a bit tolerant here. na.ok = TRUE ) + xx_format <- "xx.xxx" + if (!is.na(x) && alpha < 0.001 && alpha > 0) { + stop("jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001.") + } + if (is.na(x)) { - "NE" + format_value(x, jjcsformat_xx(xx_format), na_str = na_str) } else if (x < 0.001) { "<0.001" } else if (x > 0.999) { ">0.999" } else { - xx_format <- "xx.xxx" - res <- jjcsformat_xx(xx_format)(x) + res <- format_value(x, jjcsformat_xx(xx_format), round_type = round_type) while (as.numeric(res) == alpha && x < alpha) { # Increase precision by 1 digit until the result # is different from threshold alpha. xx_format <- paste0(xx_format, "x") - res <- jjcsformat_xx(xx_format)(x) + res <- format_value(x, jjcsformat_xx(xx_format), round_type = round_type) } res } @@ -388,9 +340,8 @@ jjcsformat_pval_fct <- function(alpha = 0.05) { #' with the specified numeric format as `(min, max)`, and the `+` is appended #' to `min` or `max` if these have been censored. #' -#' @family JJCS formats #' @export -#' +#' @rdname jjcsformat_xx #' @examples #' my_range_format <- jjcsformat_range_fct("xx.xx") #' my_range_format(c(0.35235, 99.2342, 1, 0)) @@ -400,7 +351,8 @@ jjcsformat_pval_fct <- function(alpha = 0.05) { jjcsformat_range_fct <- function(str) { format_xx <- jjcsformat_xx(str) - function(x, ...) { + function(x, output, round_type = c("iec", "sas"), ...) { + round_type <- match.arg(round_type) checkmate::assert_numeric( x, len = 4L, @@ -409,7 +361,9 @@ jjcsformat_range_fct <- function(str) { ) checkmate::assert_true(all(x[c(3, 4)] %in% c(0, 1))) - res <- vapply(x[c(1, 2)], format_xx, character(1)) + res <- vapply(x[c(1, 2)], FUN = function(x){ + format_value(x, format_xx, round_type = round_type) + }, character(1)) if (x[3] == 1) res[1] <- paste0(res[1], "+") if (x[4] == 1) res[2] <- paste0(res[2], "+") paste0("(", res[1], ", ", res[2], ")") From 81f9adc3185319c6cd5916cd14ecf6f3343b2cc4 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Mon, 8 Sep 2025 12:52:02 +0000 Subject: [PATCH 02/49] updates to test scripts --- tests/testthat/sas_comparison/relative_risk.R | 2 +- tests/testthat/test-a_freq_resp_var_j.R | 14 +- .../testthat/test-a_summarize_aval_chg_diff.R | 4 +- tests/testthat/test-analyze_values.R | 2 +- tests/testthat/test-blank_line.R | 4 +- tests/testthat/test-cmp_functions.R | 2 +- tests/testthat/test-column_stats.R | 6 +- tests/testthat/test-colwidths.R | 2 +- tests/testthat/test-count_pct.R | 44 ++--- tests/testthat/test-count_pct_relrisk.R | 14 +- tests/testthat/test-coxph_hr.R | 4 +- tests/testthat/test-coxreg_multivar.R | 4 +- tests/testthat/test-event_free.R | 4 +- tests/testthat/test-get_ref_info.R | 8 +- tests/testthat/test-jjcs_num_formats.R | 179 ++++++++++-------- tests/testthat/test-jjcsformats.R | 43 +++-- tests/testthat/test-kaplan_meier.R | 4 +- tests/testthat/test-patyrs-eair100.R | 8 +- tests/testthat/test-proportions.R | 2 +- tests/testthat/test-pruning_functions.R | 20 +- tests/testthat/test-relative_risk.R | 2 +- tests/testthat/test-remove_col_count.R | 2 +- tests/testthat/test-resp01_functions.R | 4 +- tests/testthat/test-response_by_var.R | 10 +- tests/testthat/test-sorting_functions.R | 4 +- tests/testthat/test-split_functions.R | 14 +- tests/testthat/test-summarize_ancova.R | 2 +- 27 files changed, 224 insertions(+), 184 deletions(-) diff --git a/tests/testthat/sas_comparison/relative_risk.R b/tests/testthat/sas_comparison/relative_risk.R index 90f1a98a..56e4643f 100644 --- a/tests/testthat/sas_comparison/relative_risk.R +++ b/tests/testthat/sas_comparison/relative_risk.R @@ -292,4 +292,4 @@ lyt <- basic_table() |> ) ) -build_table(lyt, dat) +build_table(lyt, dat, round_type = "sas") diff --git a/tests/testthat/test-a_freq_resp_var_j.R b/tests/testthat/test-a_freq_resp_var_j.R index 8c1009eb..36e664fb 100644 --- a/tests/testthat/test-a_freq_resp_var_j.R +++ b/tests/testthat/test-a_freq_resp_var_j.R @@ -28,7 +28,7 @@ test_that("a_freq_resp_var_j works as expected with basic usage", { ) # Build the table - tbl <- build_table(lyt, adrs) + tbl <- build_table(lyt, adrs, round_type = "sas") expect_true(!is.null(tbl)) # Extract and check one cell for basic validation @@ -66,7 +66,7 @@ test_that("a_freq_resp_var_j works with factor responses", { ) # Should not throw an error - expect_no_error(build_table(lyt, adrs)) + expect_no_error(build_table(lyt, adrs, round_type = "sas")) }) test_that("a_freq_resp_var_j handles missing values correctly", { @@ -98,7 +98,7 @@ test_that("a_freq_resp_var_j handles missing values correctly", { ) # Should not throw an error - expect_no_error(suppressWarnings(build_table(lyt, adrs))) + expect_no_error(suppressWarnings(build_table(lyt, adrs, round_type = "sas"))) }) test_that("a_freq_resp_var_j errors on invalid responses", { @@ -130,7 +130,7 @@ test_that("a_freq_resp_var_j errors on invalid responses", { # Should throw an error about invalid response values expect_error( - build_table(lyt, adrs), + build_table(lyt, adrs, round_type = "sas"), "resp_var must contain only Y/N values" ) }) @@ -157,7 +157,7 @@ test_that("a_freq_resp_var_j errors when resp_var is null", { # Should throw an error about missing resp_var expect_error( - build_table(lyt, adrs), + build_table(lyt, adrs, round_type = "sas"), "resp_var cannot be NULL." ) }) @@ -191,7 +191,7 @@ test_that("a_freq_resp_var_j works with drop_levels parameter", { ) # Should not throw an error - expect_no_error(build_table(lyt, adrs)) + expect_no_error(build_table(lyt, adrs, round_type = "sas")) }) test_that("a_freq_resp_var_j works with riskdiff parameter", { @@ -223,5 +223,5 @@ test_that("a_freq_resp_var_j works with riskdiff parameter", { ) # Should not throw an error - expect_no_error(build_table(lyt, adrs)) + expect_no_error(build_table(lyt, adrs, round_type = "sas")) }) diff --git a/tests/testthat/test-a_summarize_aval_chg_diff.R b/tests/testthat/test-a_summarize_aval_chg_diff.R index 316b6501..3ad48ca4 100644 --- a/tests/testthat/test-a_summarize_aval_chg_diff.R +++ b/tests/testthat/test-a_summarize_aval_chg_diff.R @@ -142,7 +142,7 @@ test_that("a_summarize_aval_chg_diff_j works as expected", { ) # Test that the table builds without errors - result <- expect_no_error(build_table(lyt, ADEG)) + result <- expect_no_error(build_table(lyt, ADEG, round_type = "sas")) # Check that the result is a valid rtable expect_s4_class(result, "TableTree") @@ -279,7 +279,7 @@ test_that("a_summarize_aval_chg_diff_j works with ancova = TRUE", { ) # Test that the table builds without errors - result <- expect_no_error(build_table(lyt, ADEG)) + result <- expect_no_error(build_table(lyt, ADEG, round_type = "sas")) # Check that the result is a valid rtable expect_s4_class(result, "TableTree") diff --git a/tests/testthat/test-analyze_values.R b/tests/testthat/test-analyze_values.R index d6232cab..2e0bd0ba 100644 --- a/tests/testthat/test-analyze_values.R +++ b/tests/testthat/test-analyze_values.R @@ -33,7 +33,7 @@ test_that("analyze_values correctly modifies layout with proper formats", { ) # Build the table - tbl <- build_table(modified_lyt, mock_data) + tbl <- build_table(modified_lyt, mock_data, round_type = "sas") # Check that the table has the expected structure expect_true("AGE" %in% row.names(tbl)) diff --git a/tests/testthat/test-blank_line.R b/tests/testthat/test-blank_line.R index ff79bb15..f84d3ece 100644 --- a/tests/testthat/test-blank_line.R +++ b/tests/testthat/test-blank_line.R @@ -23,7 +23,7 @@ test_that("insert_blank_line works as expected", { }) # We don't want to see any warning about duplicate table names here. - tbl <- expect_silent(build_table(lyt, ADSL)) + tbl <- expect_silent(build_table(lyt, ADSL, round_type = "sas")) tbl # We expect 6 blank lines: after mean and range, for each of the @@ -56,7 +56,7 @@ test_that("insert_blank_line optionally uses custom table names", { }) # We don't want to see any warning about duplicate table names here. - tbl <- expect_silent(build_table(lyt, ADSL)) + tbl <- expect_silent(build_table(lyt, ADSL, round_type = "sas")) tbl # We expect 6 blank lines: after mean and range, for each of the diff --git a/tests/testthat/test-cmp_functions.R b/tests/testthat/test-cmp_functions.R index d553add9..a3154302 100644 --- a/tests/testthat/test-cmp_functions.R +++ b/tests/testthat/test-cmp_functions.R @@ -1,7 +1,7 @@ test_that("cmp_split_fun works as expected", { result <- basic_table() %>% split_cols_by("ID", split_fun = cmp_split_fun) |> - build_table(formatters::DM) + build_table(formatters::DM, round_type = "sas") expect_snapshot(result) }) diff --git a/tests/testthat/test-column_stats.R b/tests/testthat/test-column_stats.R index 3a80c6a0..896ee04c 100644 --- a/tests/testthat/test-column_stats.R +++ b/tests/testthat/test-column_stats.R @@ -164,7 +164,7 @@ test_that("column_stats handles BASE variable correctly", { expect_equal(week1_base_mean, "20.00") }) -test_that("column_stats handles iec roundmethod correctly", { +test_that("column_stats handles iec round_type correctly", { # Create sample data df <- data.frame( AVISIT = c("Baseline (DB)", "Week 1", "Week 1", "Week 2", "Week 2"), @@ -180,7 +180,7 @@ test_that("column_stats handles iec roundmethod correctly", { statnm, visit, varnm, - roundmethod = "iec", + round_type = "iec", exclude_visits = "Baseline (DB)" ) } @@ -243,7 +243,7 @@ test_that("column_stats handles iec roundmethod correctly", { "Mean", "Week 1", "AVAL", - roundmethod = "sas", + round_type = "sas", exclude_visits = "Baseline (DB)" ) expect_equal(result_SAS, "25.35") diff --git a/tests/testthat/test-colwidths.R b/tests/testthat/test-colwidths.R index f3e98c7f..03644cec 100644 --- a/tests/testthat/test-colwidths.R +++ b/tests/testthat/test-colwidths.R @@ -37,7 +37,7 @@ lyt <- basic_table() |> show_labels = "visible" ) -tt <- build_table(lyt, ADSL) +tt <- build_table(lyt, ADSL, round_type = "sas") test_that("ttype_wrap_vec works as expected", { result <- ttype_wrap_vec(vec = c(1, 2, 3, 4) %>% as.character(), fontspec = fontspec, width = 2) diff --git a/tests/testthat/test-count_pct.R b/tests/testthat/test-count_pct.R index 4e316b38..58a47e4b 100644 --- a/tests/testthat/test-count_pct.R +++ b/tests/testthat/test-count_pct.R @@ -21,11 +21,11 @@ test_that("a_freq_j with val = NA and denom option", { ) # apply to adsl - tbl1 <- build_table(lyt1, adsl) + tbl1 <- build_table(lyt1, adsl, round_type = "sas") expect_snapshot(tbl1) # apply to adae - tbl1b <- build_table(lyt1, adae, adsl) + tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") expect_snapshot(tbl1b) # scenario 1c : denom = .N_col, all values @@ -41,7 +41,7 @@ test_that("a_freq_j with val = NA and denom option", { extra_args = extra_args_2 ) - tbl1c <- build_table(lyt1c, adae, adsl) + tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") expect_snapshot(tbl1c) }) @@ -60,11 +60,11 @@ test_that("a_freq_j with specific val (CHN) and denom option", { ) # apply to adsl - tbl1 <- build_table(lyt1, adsl) + tbl1 <- build_table(lyt1, adsl, round_type = "sas") expect_snapshot(tbl1) # apply to adae - tbl1b <- build_table(lyt1, adae, adsl) + tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") expect_snapshot(tbl1b) # scenario 1c : denom = .N_col, all values @@ -81,7 +81,7 @@ test_that("a_freq_j with specific val (CHN) and denom option", { extra_args = extra_args_2 ) - tbl1c <- build_table(lyt1c, adae, adsl) + tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") expect_snapshot(tbl1c) }) @@ -100,11 +100,11 @@ test_that("a_freq_j with N_only", { ) # apply to adsl - tbl1 <- build_table(lyt1, adsl) + tbl1 <- build_table(lyt1, adsl, round_type = "sas") expect_snapshot(tbl1) # apply to adae - tbl1b <- build_table(lyt1, adae, adsl) + tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") expect_snapshot(tbl1b) }) @@ -121,11 +121,11 @@ test_that("a_freq_j with TotCol_only", { analyze(vars = "COUNTRY", afun = a_freq_j, extra_args = extra_args_1) # apply to adsl - tbl1 <- build_table(lyt1, adsl) + tbl1 <- build_table(lyt1, adsl, round_type = "sas") expect_snapshot(tbl1) # apply to adae - tbl1b <- build_table(lyt1, adae, adsl) + tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") expect_snapshot(tbl1b) }) @@ -157,7 +157,7 @@ test_that("a_freq_j as cfun", { ) # apply to adae - tbl1 <- build_table(lyt1, adae, adsl) + tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") expect_snapshot(tbl1) # scenario 2 : label using label_fstr method works @@ -178,7 +178,7 @@ test_that("a_freq_j as cfun", { ) # apply to adae - tbl2 <- build_table(lyt2, adae, adsl) + tbl2 <- build_table(lyt2, adae, adsl, round_type = "sas") expect_snapshot(tbl2) }) @@ -207,7 +207,7 @@ test_that("a_freq_j with label map", { ) # apply to adae - tbl1 <- build_table(lyt1, adae, adsl) + tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") expect_snapshot(tbl1) # scenario 2 : set row label using label parameter @@ -220,7 +220,7 @@ test_that("a_freq_j with label map", { extra_args = extra_args_2 ) - tbl2 <- build_table(lyt2, adae, adsl) + tbl2 <- build_table(lyt2, adae, adsl, round_type = "sas") expect_snapshot(tbl2) }) @@ -239,11 +239,11 @@ test_that("a_freq_j (old count_pats case)", { ) # apply to adsl - tbl1 <- build_table(lyt1, adsl) + tbl1 <- build_table(lyt1, adsl, round_type = "sas") expect_snapshot(tbl1) # apply to adae - tbl1b <- build_table(lyt1, adae, adsl) + tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") expect_snapshot(tbl1b) # scenario 1c : denom = .N_col, all values @@ -259,7 +259,7 @@ test_that("a_freq_j (old count_pats case)", { extra_args = extra_args_2 ) - tbl1c <- build_table(lyt1c, adae, adsl) + tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") expect_snapshot(tbl1c) }) @@ -278,12 +278,12 @@ test_that("a_freq_j with N_subgroup as denom", { ) # applied to adsl - tbl1 <- build_table(lyt1, adsl) + tbl1 <- build_table(lyt1, adsl, round_type = "sas") expect_snapshot(tbl1) # applied to adae: when denomdf is not specified in the layout, # it will take Nsubgroup from df, not from alt_counts_df - tbl1b <- build_table(lyt1, adae, adsl) + tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") expect_snapshot(tbl1b) # applied to adae: with denomdf specified in layout @@ -303,7 +303,7 @@ test_that("a_freq_j with N_subgroup as denom", { extra_args = extra_args_2 ) - tbl1c <- build_table(lyt1c, adae, adsl) + tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") expect_snapshot(tbl1c) }) @@ -435,7 +435,7 @@ test_that("a_freq_j with N_trt as denom - special situation", { ) ## main focus of this test is on the denominator - tbl <- build_table(lyt, adaeall, alt_counts_df = adsl_) + tbl <- build_table(lyt, adaeall, alt_counts_df = adsl_, round_type = "sas") expect_snapshot(tbl) ## additionally check if the denominator values are as expected @@ -461,7 +461,7 @@ test_that("a_freq_j with keep_levels (CHN, NGA) ", { analyze(vars = "COUNTRY", afun = a_freq_j, extra_args = extra_args_1) # apply to adsl - tbl1 <- build_table(lyt1, adsl) + tbl1 <- build_table(lyt1, adsl, round_type = "sas") expect_snapshot(tbl1) # Also keep the original test to verify the specific row names diff --git a/tests/testthat/test-count_pct_relrisk.R b/tests/testthat/test-count_pct_relrisk.R index b3d51f41..04178c1a 100644 --- a/tests/testthat/test-count_pct_relrisk.R +++ b/tests/testthat/test-count_pct_relrisk.R @@ -103,7 +103,7 @@ test_that("a_freq_j with val = NA and denom option", { ) # apply to adsl - here it is not yet critical to set parameter denom - tbl1 <- build_table(lyt1, adsl) + tbl1 <- build_table(lyt1, adsl, round_type = "sas") res1 <- cell_values(tbl1["CHN", "A: Drug X"]) res1_val <- unlist(unname(res1[[DrugX_column_val]])) res1_rr <- res1[[DrugX_column_rr]] %>% as.numeric() @@ -153,7 +153,7 @@ test_that("a_freq_j with val = NA and denom option", { afun = a_freq_j, extra_args = extra_args ) - tbl1b <- build_table(lyt1b, adae, adsl) + tbl1b <- build_table(lyt1b, adae, adsl, round_type = "sas") res1b <- cell_values(tbl1b["CHN", "A: Drug X"]) res1b_val <- unlist(unname(res1b[[DrugX_column_val]])) res1b_rr <- res1b[[DrugX_column_rr]] %>% as.numeric() @@ -202,7 +202,7 @@ test_that("a_freq_j with val = NA and denom option", { afun = a_freq_j, extra_args = extra_args ) - tbl1c <- build_table(lyt1c, adae, adsl) + tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") res1c <- cell_values(tbl1c["CHN", "A: Drug X"]) res1c_val <- unlist(unname(res1c[[DrugX_column_val]])) @@ -271,7 +271,7 @@ test_that("a_freq_j with risk difference method cmh", { extra_args = extra_args ) - tbl1d <- build_table(lyt1d, adae, adsl) + tbl1d <- build_table(lyt1d, adae, adsl, round_type = "sas") res1d <- cell_values(tbl1d["CHN", "A: Drug X"]) res1d_val <- unlist(unname(res1d[[DrugX_column_val]])) @@ -373,7 +373,7 @@ test_that("a_freq_j with N_subgroup as denom", { ) # applied to adsl - tbl1 <- build_table(lyt1, adsl) + tbl1 <- build_table(lyt1, adsl, round_type = "sas") tbl1x <- tbl1[ c("SEX", "F", "COUNTRY", "count_unique_denom_fraction.CHN"), seq_len(ncol(tbl1)) @@ -421,7 +421,7 @@ test_that("a_freq_j with N_subgroup as denom", { # applied to adae: HERE, when denomdf is not specified in the layout, # it will take Nsubgroup from df, not from alt_counts_df - tbl1b <- build_table(lyt1, adae, adsl) + tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") tbl1bx <- tbl1b[ c("SEX", "F", "COUNTRY", "count_unique_denom_fraction.CHN"), seq_len(ncol(tbl1b)) @@ -490,7 +490,7 @@ test_that("a_freq_j with N_subgroup as denom", { extra_args = extra_args_2 ) - tbl1c <- build_table(lyt1c, adae, adsl) + tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") tbl1cx <- tbl1c[ c("SEX", "F", "COUNTRY", "count_unique_denom_fraction.CHN"), seq_len(ncol(tbl1c)) diff --git a/tests/testthat/test-coxph_hr.R b/tests/testthat/test-coxph_hr.R index 51399010..a2a233c0 100644 --- a/tests/testthat/test-coxph_hr.R +++ b/tests/testthat/test-coxph_hr.R @@ -70,7 +70,7 @@ test_that("a_coxph_hr works with custom arguments and stratification factors", { .stats = c("hr_ci_3d", "pvalue") ) ) %>% - build_table(df = adtte_f) + build_table(df = adtte_f, round_type = "sas") res <- expect_silent(result) expect_snapshot(res) @@ -101,7 +101,7 @@ test_that("a_coxph_hr works with stratification factors for Log-Rank test", { .stats = c("hr_ci_3d", "pvalue") ) ) %>% - build_table(df = adtte_f) + build_table(df = adtte_f, round_type = "sas") res <- expect_silent(result) expect_snapshot(res) diff --git a/tests/testthat/test-coxreg_multivar.R b/tests/testthat/test-coxreg_multivar.R index 2def990d..d553b117 100644 --- a/tests/testthat/test-coxreg_multivar.R +++ b/tests/testthat/test-coxreg_multivar.R @@ -110,13 +110,13 @@ test_that("summarize_coxreg_multivar works as expected with custom options", { ties = "breslow" ), formats = list( - coef_se = jjcsformat_xx("xx. (xx.)"), + coef_se = jjcsformat_xx("xx.x (xx.x)"), hr_est = jjcsformat_xx("xx.xxxx"), hr_ci = jjcsformat_xx("(xx.x, xx.x)"), pval = jjcsformat_pval_fct(0.1) ) ) - result <- expect_silent(build_table(lyt, anl)) + result <- expect_silent(build_table(lyt, anl, round_type = "sas")) expect_snapshot(result) }) diff --git a/tests/testthat/test-event_free.R b/tests/testthat/test-event_free.R index cbc36fc0..28dc561a 100644 --- a/tests/testthat/test-event_free.R +++ b/tests/testthat/test-event_free.R @@ -65,7 +65,7 @@ test_that("a_event_free works with default arguments in a table layout", { ) ) } - result <- build_table(lyt, df = adtte_f) + result <- build_table(lyt, df = adtte_f, round_type = "sas") res <- expect_silent(result) expect_snapshot(res) @@ -103,7 +103,7 @@ test_that("a_event_free works with customized arguments in a table layout", { ) ) } - result <- build_table(lyt, df = adtte_f) + result <- build_table(lyt, df = adtte_f, round_type = "sas") res <- expect_silent(result) expect_snapshot(res) diff --git a/tests/testthat/test-get_ref_info.R b/tests/testthat/test-get_ref_info.R index 05cb494e..a7bbd602 100644 --- a/tests/testthat/test-get_ref_info.R +++ b/tests/testthat/test-get_ref_info.R @@ -58,7 +58,7 @@ test_that("get_ref_info works with a df analysis function", { extra_args = list(ref_path = ref_path), afun = result_afun ) - result <- build_table(lyt, dm) + result <- build_table(lyt, dm, round_type = "sas") expect_snapshot(result) # Compare with non-hierarchical layout. @@ -69,7 +69,7 @@ test_that("get_ref_info works with a df analysis function", { extra_args = list(ref_path = ref_path), afun = standard_afun ) - std_result <- build_table(std_lyt, dm) + std_result <- build_table(std_lyt, dm, round_type = "sas") expect_snapshot(std_result) }) @@ -120,7 +120,7 @@ test_that("get_ref_info works with a vector analysis function", { extra_args = list(ref_path = ref_path), afun = result_afun ) - result <- build_table(lyt, dm) + result <- build_table(lyt, dm, round_type = "sas") expect_snapshot(result) # Compare with non-hierarchical layout. @@ -131,7 +131,7 @@ test_that("get_ref_info works with a vector analysis function", { extra_args = list(ref_path = ref_path), afun = standard_afun ) - std_result <- build_table(std_lyt, dm) + std_result <- build_table(std_lyt, dm, round_type = "sas") expect_snapshot(std_result) # Keep one explicit check to verify the relationship between the two outputs diff --git a/tests/testthat/test-jjcs_num_formats.R b/tests/testthat/test-jjcs_num_formats.R index d68273cd..80e14c49 100644 --- a/tests/testthat/test-jjcs_num_formats.R +++ b/tests/testthat/test-jjcs_num_formats.R @@ -29,90 +29,94 @@ test_that("jjcs_num_formats works", { # jjcsformat_xx_SAS/R format tests expect_snapshot({ - format_value(values, format = jjcsformat_xx_SAS("xx.x (xx.xx)")) - format_value(values, format = jjcsformat_xx_R("xx.x (xx.xx)")) - format_value(c(5.05, values[2]), format = jjcsformat_xx_SAS("xx.x (xx.xx)")) - format_value(c(5.05, values[2]), format = jjcsformat_xx_R("xx.x (xx.xx)")) - format_value(c(5.15, values[2]), format = jjcsformat_xx_R("xx.x (xx.xx)")) + format_value(values, format = jjcsformat_xx("xx.x (xx.xx)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx.x (xx.xx)"), round_type = "iec") + format_value(c(5.05, values[2]), format = jjcsformat_xx("xx.x (xx.xx)"), round_type = "sas") + format_value(c(5.05, values[2]), format = jjcsformat_xx("xx.x (xx.xx)"), round_type = "iec") + format_value(c(5.15, values[2]), format = jjcsformat_xx("xx.x (xx.xx)"), round_type = "iec") format_value(c(5.15, values[2]), format = "xx.x (xx.x)") - format_value(c(4.15, values[2]), format = jjcsformat_xx_SAS("xx.x (xx.xx)")) - format_value(c(4.15, values[2]), format = jjcsformat_xx_R("xx.x (xx.xx)")) + format_value(c(4.15, values[2]), format = jjcsformat_xx("xx.x (xx.xx)"), round_type = "sas") + format_value(c(4.15, values[2]), format = jjcsformat_xx("xx.x (xx.xx)"), round_type = "iec") format_value(c(4.15, values[2]), format = "xx.x (xx.x)") - format_value(c(4.15, values[2]), format = jjcsformat_xx_SAS("xx.x (xx.x)")) + format_value(c(4.15, values[2]), format = jjcsformat_xx("xx.x (xx.x)"), round_type = "sas") format_value(c(3.15, values[2]), format = "xx.x (xx.x)") - format_value(c(3.15, values[2]), format = jjcsformat_xx_SAS("xx.x (xx.x)")) - format_value(c(3.15, values[2]), format = jjcsformat_xx_R("xx.x (xx.x)")) + format_value(c(3.15, values[2]), format = jjcsformat_xx("xx.x (xx.x)"), round_type = "sas") + format_value(c(3.15, values[2]), format = jjcsformat_xx("xx.x (xx.x)"), round_type = "iec") }) # jjcsformat_xx_SAS format tests expect_snapshot({ - format_value(values, format = jjcsformat_xx_SAS("xx / xx")) - format_value(values, format = jjcsformat_xx_SAS("xx. / xx.")) - format_value(values, format = jjcsformat_xx_SAS("xx.x / xx.x")) - format_value(values, format = jjcsformat_xx_SAS("xx.xx / xx.xx")) - format_value(values, format = jjcsformat_xx_SAS("xx.xxx / xx.xxx")) - format_value(values, format = jjcsformat_xx_SAS("(xx, xx)")) - format_value(values, format = jjcsformat_xx_SAS("(xx., xx.)")) - format_value(values, format = jjcsformat_xx_SAS("(xx.x, xx.x)")) - format_value(values, format = jjcsformat_xx_SAS("(xx.xx, xx.xx)")) - format_value(values, format = jjcsformat_xx_SAS("(xx.xxx, xx.xxx)")) - format_value(values, format = jjcsformat_xx_SAS("(xx.xxxx, xx.xxxx)")) - format_value(values, format = jjcsformat_xx_SAS("xx - xx")) - format_value(values, format = jjcsformat_xx_SAS("xx.x - xx.x")) - format_value(values, format = jjcsformat_xx_SAS("xx.xx - xx.xx")) - format_value(values, format = jjcsformat_xx_SAS("xx (xx)")) - format_value(values, format = jjcsformat_xx_SAS("xx (xx.)")) - format_value(values, format = jjcsformat_xx_SAS("xx (xx.x)")) - format_value(values, format = jjcsformat_xx_SAS("xx (xx.xx)")) - format_value(values, format = jjcsformat_xx_SAS("xx. (xx.)")) - format_value(values, format = jjcsformat_xx_SAS("xx.x (xx.x)")) - format_value(values, format = jjcsformat_xx_SAS("xx.xx (xx.xx)")) - format_value(values, format = jjcsformat_xx_SAS("xx.x, xx.x")) - format_value(values, format = jjcsformat_xx_SAS("xx.x to xx.x")) + format_value(values, format = jjcsformat_xx("xx / xx"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx. / xx."), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx.x / xx.x"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx.xx / xx.xx"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx.xxx / xx.xxx"), round_type = "sas") + format_value(values, format = jjcsformat_xx("(xx, xx)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("(xx., xx.)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("(xx.x, xx.x)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("(xx.xx, xx.xx)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("(xx.xxx, xx.xxx)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("(xx.xxxx, xx.xxxx)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx - xx"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx.x - xx.x"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx.xx - xx.xx"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx (xx)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx (xx.)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx (xx.x)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx (xx.xx)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx. (xx.)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx.x (xx.x)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx.xx (xx.xx)"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx.x, xx.x"), round_type = "sas") + format_value(values, format = jjcsformat_xx("xx.x to xx.x"), round_type = "sas") format_value( c(values, 10.1235), - format = jjcsformat_xx_SAS("xx. (xx. - xx.)") + format = jjcsformat_xx("xx. (xx. - xx.)"), + round_type = "sas" ) format_value( c(values, 10.1235), - format = jjcsformat_xx_SAS("xx.x (xx.x - xx.x)") + format = jjcsformat_xx("xx.x (xx.x - xx.x)"), + round_type = "sas" ) format_value( c(values, 10.1235), - format = jjcsformat_xx_SAS("xx.xx (xx.xx - xx.xx)") + format = jjcsformat_xx("xx.xx (xx.xx - xx.xx)"), + round_type = "sas" ) format_value( c(values, 10.1235), - format = jjcsformat_xx_SAS("xx.xxx (xx.xxx - xx.xxx)") + format = jjcsformat_xx("xx.xxx (xx.xxx - xx.xxx)"), + round_type = "sas" ) - format_value(NULL, jjcsformat_xx_SAS("xx")) - format_value(c(500), jjcsformat_xx_SAS("N=xx")) - format_value(c(500), jjcsformat_xx_SAS("(N=xx)")) + format_value(NULL, jjcsformat_xx("xx"), round_type = "sas") + format_value(c(500), jjcsformat_xx("N=xx"), round_type = "sas") + format_value(c(500), jjcsformat_xx("(N=xx)"), round_type = "sas") }) ## errors expect_error( - format_value(5.1, jjcsformat_xx_SAS("abcd")), + format_value(5.1, jjcsformat_xx("abcd")), "input str must contain xx" ) expect_error( - format_value(5.1, jjcsformat_xx_SAS("xx - xx")), - "jjcs_format_xx must contain same number of xx as the number of stats" + format_value(5.1, jjcsformat_xx("xx + xx")), + "jjcsformat_xx must contain same number of xx as the number of stats" ) expect_error( - format_value(c(5.1, 2, 3), jjcsformat_xx_SAS("xx - xx")), - "jjcs_format_xx must contain same number of xx as the number of stats" + format_value(c(5.1, 2, 3), jjcsformat_xx("xx + xx")), + "jjcsformat_xx must contain same number of xx as the number of stats" ) ## trailing 0s are correct expect_snapshot({ - format_value(0, jjcsformat_xx_SAS("xx.")) - format_value(0, jjcsformat_xx_SAS("xx.x")) - format_value(0, jjcsformat_xx_SAS("xx.xx")) - format_value(0, jjcsformat_xx_SAS("xx.xxx")) - format_value(0, jjcsformat_xx_SAS("xx.xxxx")) + format_value(0, jjcsformat_xx("xx."), round_type = "sas") + format_value(0, jjcsformat_xx("xx.x"), round_type = "sas") + format_value(0, jjcsformat_xx("xx.xx"), round_type = "sas") + format_value(0, jjcsformat_xx("xx.xxx"), round_type = "sas") + format_value(0, jjcsformat_xx("xx.xxxx"), round_type = "sas") }) }) @@ -121,29 +125,32 @@ test_that("jjcsformats NA works", { ## handling NAs expect_snapshot({ - format_value(NA, jjcsformat_xx_SAS("xx."), na_str = "-") - format_value(NA, jjcsformat_xx_SAS("xx"), na_str = "-") + format_value(NA, jjcsformat_xx("xx."), round_type = "sas", na_str = "-") + format_value(NA, jjcsformat_xx("xx"), round_type = "sas", na_str = "-") }) expect_error( - format_value(c(1, NA), jjcsformat_xx_SAS("xx")), - "jjcs_format_xx must contain same number of xx as the number of stats" + format_value(c(1, NA), format = jjcsformat_xx("xx.x (xx.x - xx.x"), round_type = "sas"), + "jjcsformat_xx must contain same number of xx as the number of stats" ) expect_snapshot({ format_value( c(1.2, NA, NA), - jjcsformat_xx_SAS("xx.x (xx.x - xx.x)"), - na_str = "NA" + jjcsformat_xx("xx.x (xx.x - xx.x)"), + round_type = "sas", + na_str = "NE" ) format_value( c(1.2, NA, NA), - jjcsformat_xx_SAS("xx.x (xx.x - xx.x)"), + jjcsformat_xx("xx.x (xx.x - xx.x)"), + round_type = "sas", na_str = "x" ) format_value( c(NA, NA, NA), - jjcsformat_xx_SAS("xx.x (xx.x - xx.x)"), + jjcsformat_xx("xx.x (xx.x - xx.x)"), + round_type = "sas", na_str = "x" ) }) @@ -151,51 +158,59 @@ test_that("jjcsformats NA works", { expect_snapshot({ format_value( c(NA, NA), - format = jjcsformat_xx_SAS("xx.x - xx.x"), + format = jjcsformat_xx("xx.x - xx.x"), + round_type = "sas", na_str = c("hi", "lo") ) format_value( c(NA, 5.2), - format = jjcsformat_xx_SAS("xx.x - xx.x"), + format = jjcsformat_xx("xx.x - xx.x"), + round_type = "sas", na_str = "what" ) format_value( c(NA, 5.2), - format = jjcsformat_xx_SAS("xx.x - xx.x"), + format = jjcsformat_xx("xx.x - xx.x"), + round_type = "sas", na_str = c("hi", "lo") ) format_value( c(NA, NA), - format = jjcsformat_xx_SAS("xx.x - xx.x"), + format = jjcsformat_xx("xx.x - xx.x"), + round_type = "sas", na_str = "what" ) }) expect_snapshot({ - format_value(NA, format = jjcsformat_xx_SAS("xx.x"), na_str = character()) - format_value(NA, format = jjcsformat_xx_SAS("xx.x"), na_str = NA_character_) + format_value(NA, format = jjcsformat_xx("xx.x"), round_type = "sas", na_str = character()) + format_value(NA, format = jjcsformat_xx("xx.x"), round_type = "sas", na_str = NA_character_) }) # 3 d formats expect_snapshot({ format_value( c(6.23, NA, NA), - format = jjcsformat_xx_SAS("xx.x (xx.xx, xx.xx)"), + format = jjcsformat_xx("xx.x (xx.xx, xx.xx)"), + round_type = "sas", na_str = "-" ) format_value( c(NA, NA, NA), - format = jjcsformat_xx_SAS("xx.x (xx.xx, xx.xx)"), + format = jjcsformat_xx("xx.x (xx.xx, xx.xx)"), + round_type = "sas", na_str = "-" ) format_value( c(6.23, NA, NA), - format = jjcsformat_xx_SAS("xx.x (xx.xx, xx.xx)"), + format = jjcsformat_xx("xx.x (xx.xx, xx.xx)"), + round_type = "sas", na_str = c("-", "x", "x") ) format_value( c(6.23, NA, NA), - format = jjcsformat_xx_SAS("xx.x (xx.xx, xx.xx)"), + format = jjcsformat_xx("xx.x (xx.xx, xx.xx)"), + round_type = "sas", na_str = c("-", "x", "y") ) }) @@ -203,33 +218,37 @@ test_that("jjcsformats NA works", { test_that("jjcsformats count_fraction works", { expect_snapshot({ - format_value(cdf, format = jjcsformat_count_denom_fraction) - format_value(cf, format = jjcsformat_count_fraction) - format_value(cf, format = "xx (xx.x%)") + format_value(cdf, format = jjcsformat_count_denom_fraction, round_type = "sas") + format_value(cf, format = jjcsformat_count_fraction, round_type = "sas") + format_value(cf, format = "xx (xx.x%)", round_type = "iec") format_value( c(2000, 2001, 2000 / 2001), - format = jjcsformat_count_denom_fraction + format = jjcsformat_count_denom_fraction, + round_type = "sas" ) - format_value(c(2000, 2000 / 2001), format = "xx (xx.x%)") - format_value(c(1, 2001, 1 / 2001), format = jjcsformat_count_denom_fraction) - format_value(c(1, 1 / 2001), format = "xx (xx.x%)") - format_value(c(3, 3, 3 / 3), format = jjcsformat_count_denom_fraction) - format_value(c(3, 3 / 3), format = "xx (xx.x%)") + format_value(c(2000, 2000 / 2001), format = "xx (xx.x%)", round_type = "iec") + format_value(c(1, 2001, 1 / 2001), format = jjcsformat_count_denom_fraction, round_type = "sas") + format_value(c(1, 1 / 2001), format = "xx (xx.x%)", round_type = "iec") + format_value(c(3, 3, 3 / 3), format = jjcsformat_count_denom_fraction, round_type = "sas") + format_value(c(3, 3 / 3), format = "xx (xx.x%)", round_type = "iec") format_value( rep(NA, 3), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), - na_str = rep("NA", 10) + round_type = "sas", + na_str = rep("NE", 10) ) format_value( rep(NA, 3), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), + round_type = "sas", na_str = rep("NA", 1) ) - format_value(rep(NA, 3), format = jjcsformat_xx("xx.x (xx.x, xx.x)")) - format_value(c(1, rep(NA, 2)), format = jjcsformat_xx("xx.x (xx.x, xx.x)")) + format_value(rep(NA, 3), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), round_type = "sas", na_str = "NA") + format_value(c(1, rep(NA, 2)), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), round_type = "sas", na_str = rep("NE", 10)) format_value( c(1, rep(NA, 2)), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), + round_type = "sas", na_str = c("ne1", "ne2", "ne3") ) }) diff --git a/tests/testthat/test-jjcsformats.R b/tests/testthat/test-jjcsformats.R index a4c77138..6cc6e210 100644 --- a/tests/testthat/test-jjcsformats.R +++ b/tests/testthat/test-jjcsformats.R @@ -108,27 +108,27 @@ test_that("jjjcs formats work", { ## round type works -test_that("roundmethod support works", { +test_that("round_type support works", { val <- 7.05 ## differs for xx.x between round types expect_equal( - format_value(val, format = jjcsformat_xx("xx.x")), + format_value(val, format = jjcsformat_xx("xx.x"), round_type = "sas"), format_value(val, format = "xx.x", round_type = "sas") ) # nolint start - expect_false(format_value(val, format = jjcsformat_xx("xx.x")) == + expect_false(format_value(val, format = jjcsformat_xx("xx.x"), round_type = "sas") == format_value(val, format = "xx.x", round_type = "iec")) # nolint end val2 <- c(5, 0.9945) expect_equal( format_value(val2, format = "xx (xx.x%)", round_type = "sas"), - jjcsformat_count_fraction(val2, roundmethod = "sas") + jjcsformat_count_fraction(val2, round_type = "sas") ) expect_equal( format_value(val2, format = "xx (xx.x%)", round_type = "iec"), - jjcsformat_count_fraction(val2, roundmethod = "iec") + jjcsformat_count_fraction(val2, round_type = "iec") ) # nolint start - expect_false(jjcsformat_count_fraction(val2, roundmethod = "sas") == + expect_false(jjcsformat_count_fraction(val2, round_type = "sas") == format_value(val2, "xx (xx.x%)", round_type = "iec")) # nolint end val3 <- c(5, 10, 0.9945) @@ -137,18 +137,18 @@ test_that("roundmethod support works", { add_spcs_fmt <- function(str) gsub("/", " / ", str, fixed = TRUE) expect_equal( format_value(val3, format = "xx / xx (xx.x%)", round_type = "sas"), - add_spcs_fmt(jjcsformat_count_denom_fraction(val3, roundmethod = "sas")) + add_spcs_fmt(jjcsformat_count_denom_fraction(val3, round_type = "sas")) ) expect_equal( format_value(val3, format = "xx / xx (xx.x%)", round_type = "iec"), - add_spcs_fmt(jjcsformat_count_denom_fraction(val3, roundmethod = "iec")) + add_spcs_fmt(jjcsformat_count_denom_fraction(val3, round_type = "iec")) ) # nolint start - expect_false(add_spcs_fmt(jjcsformat_count_denom_fraction(val3, roundmethod = "sas")) == + expect_false(add_spcs_fmt(jjcsformat_count_denom_fraction(val3, round_type = "sas")) == format_value(val3, "xx / xx (xx.x%)", round_type = "iec")) - expect_false(jjcsformat_fraction_count_denom(val3, roundmethod = "sas") == - jjcsformat_fraction_count_denom(val3, roundmethod = "iec")) + expect_false(jjcsformat_fraction_count_denom(val3, round_type = "sas") == + jjcsformat_fraction_count_denom(val3, round_type = "iec")) # nolint end }) @@ -166,6 +166,7 @@ test_that("jjcsformat_pval_fct works", { expect_snapshot({ jjcsformat_pval_fct(0.005)(0.0048) jjcsformat_pval_fct(0.005)(0.00499) + jjcsformat_pval_fct(0.005)(0.000499) jjcsformat_pval_fct(0)(0.0048) jjcsformat_pval_fct(0.05)(0.0048) jjcsformat_pval_fct(0.005)(0.0051) @@ -175,9 +176,29 @@ test_that("jjcsformat_pval_fct works", { jjcsformat_pval_fct(0)(0.9999) jjcsformat_pval_fct(0)(0.999) jjcsformat_pval_fct(0)(0.9990000001) + jjcsformat_pval_fct(0)(NA_real_, na_str = "ne") + jjcsformat_pval_fct(0.0005)(NA_real_, na_str = "ne") }) }) +test_that("some special cases for jjcsformat_pval_fct",{ + expect_identical( + format_value(NA_real_, format = jjcsformat_pval_fct(0), na_str = "NE"), + "NE") + expect_identical( + format_value(NA_real_, format = jjcsformat_pval_fct(0.0005), na_str = "NE"), + "NE") + expect_error( + format_value(0.00000123, format = jjcsformat_pval_fct(0.0005), na_str = "NE"), + "jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001." + ) + expect_error( + format_value(NA_real_, format = jjcsformat_pval_fct(0.0005), na_str = "NE")? + "jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001." + ) + +}) + test_that("jjcsformat_xx works also for empty cells", { expect_silent(in_rows( .list = list( diff --git a/tests/testthat/test-kaplan_meier.R b/tests/testthat/test-kaplan_meier.R index f9336606..6d77bfff 100644 --- a/tests/testthat/test-kaplan_meier.R +++ b/tests/testthat/test-kaplan_meier.R @@ -108,7 +108,7 @@ test_that("a_kaplan_meier works inside analyze in table", { is_event = "is_event" ) ) %>% - build_table(df = adtte_f) + build_table(df = adtte_f, round_type = "sas") res <- expect_silent(result) expect_snapshot(res) @@ -145,7 +145,7 @@ test_that("a_kaplan_meier works inside analyze in table with custom arguments", .indent_mods = c(median_ci_3d = 3L) ) ) %>% - build_table(df = adtte_f) + build_table(df = adtte_f, round_type = "sas") res <- expect_silent(result) expect_snapshot(res) diff --git a/tests/testthat/test-patyrs-eair100.R b/tests/testthat/test-patyrs-eair100.R index 70632323..258556fc 100644 --- a/tests/testthat/test-patyrs-eair100.R +++ b/tests/testthat/test-patyrs-eair100.R @@ -72,7 +72,7 @@ test_that("Check patient years numbers are giving expected result", { afun = a_patyrs_j, extra_args = extra_args ) - tbl1 <- build_table(lyt1, adae, adsl) + tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") res1 <- cell_values(tbl1[c("TRTDURY", "patyrs"), "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) @@ -105,7 +105,7 @@ test_that("Check aeir100 numbers are giving expected result", { ref_path = ref_path ) ) - tbl1 <- build_table(lyt1, adae, adsl) + tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") res1 <- cell_values(tbl1["dcd A.1.1.1.1", "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) @@ -158,7 +158,7 @@ test_that("Check aeir100 numbers are giving expected result when fup_var argumen ref_path = ref_path ) ) - tbl1 <- build_table(lyt1, adae, adsl) + tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") res1 <- cell_values(tbl1["dcd A.1.1.1.1", "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) @@ -211,7 +211,7 @@ test_that("Check aeir100 numbers are giving expected result when occ_dy argument ref_path = ref_path ) ) - tbl1 <- build_table(lyt1, adae, adsl) + tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") res1 <- cell_values(tbl1["dcd A.1.1.1.1", "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) diff --git a/tests/testthat/test-proportions.R b/tests/testthat/test-proportions.R index eaff4e39..f2f38601 100644 --- a/tests/testthat/test-proportions.R +++ b/tests/testthat/test-proportions.R @@ -105,7 +105,7 @@ test_that("a_proportion_ci_factor works as expected", { test_that("prop_split_fun works as expected", { result <- basic_table() %>% split_cols_by("ID", split_fun = prop_split_fun) |> - build_table(formatters::DM) + build_table(formatters::DM, round_type = "sas") expect_snapshot(result) }) diff --git a/tests/testthat/test-pruning_functions.R b/tests/testthat/test-pruning_functions.R index 4a4ad061..91695af1 100644 --- a/tests/testthat/test-pruning_functions.R +++ b/tests/testthat/test-pruning_functions.R @@ -8,7 +8,7 @@ tab <- basic_table() %>% split_rows_by("STRATA1") %>% summarize_row_groups() %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(formatters::DM) + build_table(formatters::DM, round_type = "sas") trtvar <- "ARM" ctrl_grp <- "B: Placebo" @@ -130,7 +130,7 @@ testthat::test_that("test keep_non_null_rows", { split_rows_by("ARM") %>% analyze("ARM", afun = xnull_cell_fn, show_labels = "hidden") %>% analyze("STRATA1", show_labels = "hidden") %>% - build_table(formatters::DM) + build_table(formatters::DM, round_type = "sas") result <- prune_table(tabsx, keep_rows(keep_non_null_rows)) @@ -138,7 +138,7 @@ testthat::test_that("test keep_non_null_rows", { split_cols_by("ARM") %>% split_rows_by("ARM") %>% analyze("STRATA1") %>% - build_table(formatters::DM) + build_table(formatters::DM, round_type = "sas") expected <- tabsx2 @@ -177,7 +177,7 @@ testthat::test_that("bspt_pruner with fraction", { tab_bspt_pruner <- basic_table() %>% split_cols_by("ARM") %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(formatters::DM) + build_table(formatters::DM, round_type = "sas") result <- prune_table( tab_bspt_pruner, @@ -197,7 +197,7 @@ testthat::test_that("bspt_pruner with fraction and diff_from_control", { tab_bspt_pruner <- basic_table() %>% split_cols_by("ARM") %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(formatters::DM) + build_table(formatters::DM, round_type = "sas") result <- prune_table( tab_bspt_pruner, @@ -219,7 +219,7 @@ testthat::test_that("bspt_pruner with fraction and diff_from_control and keeprow tab_bspt_pruner <- basic_table() %>% split_cols_by("ARM") %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(formatters::DM) + build_table(formatters::DM, round_type = "sas") result1 <- prune_table( tab_bspt_pruner, @@ -280,7 +280,7 @@ testthat::test_that("count_pruner in small groups", { split_fun = remove_split_levels("B: Placebo") ) %>% analyze("COUNTRY", afun = a_freq_j, extra_args = extra_args) %>% - build_table(DM_sub) + build_table(DM_sub, round_type = "sas") result <- prune_table( tab_bspt_pruner, @@ -402,7 +402,7 @@ testthat::test_that("bspt_pruner in AE like tables", { show_labels = "hidden", extra_args = extra_args_rr ) %>% - build_table(my_adae, my_adsl) + build_table(my_adae, my_adsl, round_type = "sas") result1 <- safe_prune_table( tbl1, @@ -461,7 +461,7 @@ testthat::test_that("bspt_pruner with less obvious control specifications", { split_cols_by("ARM") %>% split_cols_by("SEX") %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(DM_sub) + build_table(DM_sub, round_type = "sas") rps_label <- make_row_df(tab_bspt_pruner)$label @@ -516,7 +516,7 @@ my_DM <- formatters::DM %>% my_tab <- basic_table() %>% split_cols_by("ARM") %>% analyze("AGE") %>% - build_table(my_DM) + build_table(my_DM, round_type = "sas") testthat::test_that("check that if all data is pruned leaving no rows, the outcome is the message", { # create an empty table tree so we can see that safe_prune_table returns the message the user specified diff --git a/tests/testthat/test-relative_risk.R b/tests/testthat/test-relative_risk.R index 86aa27bb..35f56ad0 100644 --- a/tests/testthat/test-relative_risk.R +++ b/tests/testthat/test-relative_risk.R @@ -354,7 +354,7 @@ test_that("a_relative_risk in table layout gives same results as with SAS", { ) ) - result <- build_table(lyt, dat) + result <- build_table(lyt, dat, round_type = "sas") first_row <- as.list(result[ c("Response", "rel_risk_ci"), c("Treatment", "A") diff --git a/tests/testthat/test-remove_col_count.R b/tests/testthat/test-remove_col_count.R index a6fd6c0d..b11bb5b5 100644 --- a/tests/testthat/test-remove_col_count.R +++ b/tests/testthat/test-remove_col_count.R @@ -19,7 +19,7 @@ testthat::test_that("remove_col_count works", { split_cols_by("set2", nested = FALSE) %>% split_cols_by("ARM", split_fun = remove_split_levels("B: Placebo")) - tbl <- build_table(lyt, adsl) + tbl <- build_table(lyt, adsl, round_type = "sas") tbl2 <- remove_col_count(tbl, span_label_var = "set2") diff --git a/tests/testthat/test-resp01_functions.R b/tests/testthat/test-resp01_functions.R index fbf52144..a05a8460 100644 --- a/tests/testthat/test-resp01_functions.R +++ b/tests/testthat/test-resp01_functions.R @@ -8,7 +8,7 @@ test_that("resp01_split_fun_fct 1 works as expected", { result <- basic_table() %>% split_cols_by("ARM", split_fun = add_overall_level("Overall")) %>% split_cols_by("ID", split_fun = split_fun) |> - build_table(formatters::DM) + build_table(formatters::DM, round_type = "sas") expect_snapshot(result) }) @@ -20,7 +20,7 @@ test_that("resp01_split_fun_fct 2 works as expected", { result <- basic_table() %>% split_cols_by("ARM", split_fun = add_overall_level("Overall")) %>% split_cols_by("ID", split_fun = split_fun) |> - build_table(formatters::DM) + build_table(formatters::DM, round_type = "sas") expect_snapshot(col_info(result)) }) diff --git a/tests/testthat/test-response_by_var.R b/tests/testthat/test-response_by_var.R index d3c618e8..ff418c24 100644 --- a/tests/testthat/test-response_by_var.R +++ b/tests/testthat/test-response_by_var.R @@ -28,7 +28,7 @@ test_that("response_by_var various scenarios", { ) ## Scenario 1: TRTEMFL has no missing values and values Y/N - tbl <- build_table(lyt, adsl) + tbl <- build_table(lyt, adsl, round_type = "sas") res1 <- cell_values(tbl[c("SEX", "F"), "A: Drug X"]) res1 <- unlist(unname(res1)) @@ -47,7 +47,7 @@ test_that("response_by_var various scenarios", { adsl2$TRTEMFL <- ifelse(adsl2$TRTEMFL == "Y", "Y", NA) adsl2$TRTEMFL <- factor(adsl2$TRTEMFL, levels = "Y") - tbl2 <- build_table(lyt, adsl2) + tbl2 <- build_table(lyt, adsl2, round_type = "sas") expect_snapshot(tbl2) ## Scenario 3: TRTEMFL has missing values and Y only, and analysis variable has missing values @@ -56,7 +56,7 @@ test_that("response_by_var various scenarios", { adsl3$TRTEMFL <- factor(adsl3$TRTEMFL, levels = "Y") adsl3$SEX[1:10] <- NA_character_ - tbl3 <- build_table(lyt, adsl3) + tbl3 <- build_table(lyt, adsl3, round_type = "sas") res3 <- cell_values(tbl3[c("SEX", "F"), "A: Drug X"]) res3 <- unlist(unname(res3)) @@ -75,7 +75,7 @@ test_that("response_by_var various scenarios", { adsl4$SEX[1:10] <- NA_character_ adsl4$TRTEMFL[8:15] <- NA_character_ - tbl4 <- build_table(lyt, adsl4) + tbl4 <- build_table(lyt, adsl4, round_type = "sas") res4 <- cell_values(tbl4[c("SEX", "F"), "A: Drug X"]) res4 <- unlist(unname(res4)) @@ -96,7 +96,7 @@ test_that("response_by_var various scenarios", { levels = c(levels(adsl5$SEX), "extra level") ) - tbl5 <- build_table(lyt, adsl5) + tbl5 <- build_table(lyt, adsl5, round_type = "sas") res5 <- cell_values(tbl5[c("SEX", "extra level"), "A: Drug X"]) res5 <- unlist(unname(res5)) diff --git a/tests/testthat/test-sorting_functions.R b/tests/testthat/test-sorting_functions.R index b1f1125e..1ed50161 100644 --- a/tests/testthat/test-sorting_functions.R +++ b/tests/testthat/test-sorting_functions.R @@ -16,7 +16,7 @@ tab <- basic_table() %>% split_rows_by("STRATA1") %>% summarize_row_groups() %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(DM2) + build_table(DM2, round_type = "sas") #### Tests for jj_complex_scorefun function #### testthat::test_that("jj_complex_scorefun is identical to standard sorting: spanningheadercolvar=NA", { @@ -138,7 +138,7 @@ tab2 <- basic_table() %>% split_rows_by("STRATA1") %>% summarize_row_groups() %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(DM2) + build_table(DM2, round_type = "sas") testthat::test_that("jj_complex_scorefun uses first column to sort: usefirstcol", { result <- sort_at_path( diff --git a/tests/testthat/test-split_functions.R b/tests/testthat/test-split_functions.R index 52e9ee71..a36126df 100644 --- a/tests/testthat/test-split_functions.R +++ b/tests/testthat/test-split_functions.R @@ -30,7 +30,7 @@ testthat::test_that("cond_rm_facets works", { split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% split_cols_by("ARM", split_fun = mysplit) - tbl <- build_table(lyt, adsl) + tbl <- build_table(lyt, adsl, round_type = "sas") cols <- make_col_df(tbl, visible_only = TRUE)$name expected <- c("A: Drug X", "C: Combination", "Combined", "B: Placebo") @@ -59,7 +59,7 @@ testthat::test_that("rm_levels works", { split_rows_by("COUNTRY", split_fun = split_fun) %>% summarize_row_groups() # for simplicity - tbl <- build_table(lyt, adsl) + tbl <- build_table(lyt, adsl, round_type = "sas") expected <- setdiff(levels(adsl$COUNTRY), c("JPN", "USA", "NGA")) @@ -80,7 +80,7 @@ testthat::test_that("real_add_overall_facet works", { split_rows_by("COUNTRY", split_fun = split_fun) %>% summarize_row_groups() # for simplicity - tbl <- build_table(lyt, adsl) + tbl <- build_table(lyt, adsl, round_type = "sas") expected <- c(levels(adsl$COUNTRY), "Overall") @@ -104,7 +104,7 @@ testthat::test_that("make_combo_splitfun works", { split_rows_by("COUNTRY", split_fun = split_fun) %>% summarize_row_groups() # for simplicity - tbl <- build_table(lyt, adsl) + tbl <- build_table(lyt, adsl, round_type = "sas") expected <- "Some Combined Countries" @@ -141,7 +141,7 @@ testthat::test_that("combine_nonblank works", { split_rows_by("COUNTRY", split_fun = split_fun) %>% summarize_row_groups() # for simplicity - tbl <- build_table(lyt, adsl) + tbl <- build_table(lyt, adsl, round_type = "sas") expected <- c(levels(adsl$COUNTRY), "Overall") @@ -175,13 +175,13 @@ testthat::test_that("rm_blank_levels works", { lyt <- basic_table() %>% split_rows_by("COUNTRY") %>% summarize_row_groups() - tbl <- build_table(lyt, adsl) + tbl <- build_table(lyt, adsl, round_type = "sas") row_names_before <- rtables::row.names(tbl) lyt <- basic_table() %>% split_rows_by("COUNTRY", split_fun = split_fun) %>% summarize_row_groups() - tbl <- build_table(lyt, adsl) + tbl <- build_table(lyt, adsl, round_type = "sas") row_names_after <- rtables::row.names(tbl) diff --git a/tests/testthat/test-summarize_ancova.R b/tests/testthat/test-summarize_ancova.R index cbfdc6dc..2a711b9e 100644 --- a/tests/testthat/test-summarize_ancova.R +++ b/tests/testthat/test-summarize_ancova.R @@ -188,6 +188,6 @@ test_that("a_summarize_ancova_j works as expected in table layout", { ) ) ) %>% - build_table(iris) + build_table(iris, round_type = "sas") expect_snapshot(result) }) From 33230bef9ed1d2a79557e93a6a2c2605672359b7 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Mon, 8 Sep 2025 12:59:45 +0000 Subject: [PATCH 03/49] update test snapshots --- man/count_denom_fraction.Rd | 34 ---- tests/testthat/_snaps/coxreg_multivar.md | 8 +- tests/testthat/_snaps/jjcs_num_formats.md | 189 ++++++++++++---------- tests/testthat/_snaps/jjcsformats.md | 12 ++ 4 files changed, 120 insertions(+), 123 deletions(-) delete mode 100644 man/count_denom_fraction.Rd diff --git a/man/count_denom_fraction.Rd b/man/count_denom_fraction.Rd deleted file mode 100644 index c4394543..00000000 --- a/man/count_denom_fraction.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jjcsformats.R -\name{jjcsformat_count_denom_fraction} -\alias{jjcsformat_count_denom_fraction} -\title{Formatting count, denominator and fraction values.} -\usage{ -jjcsformat_count_denom_fraction(x, d = 1, roundmethod = c("sas", "iec"), ...) -} -\arguments{ -\item{x}{(\verb{numeric vector})\cr Vector with elements \code{num} and \code{fraction} or \code{num}, \code{denom} and \code{fraction}.} - -\item{d}{(\code{numeric(1)})\cr Number of digits to round fraction to (default = 1)} - -\item{roundmethod}{(\code{string})\cr choice of rounding methods. Options are: -\itemize{ -\item \code{sas}: the underlying rounding method is \code{tidytlg::roundSAS}, where \cr -roundSAS comes from this Stack Overflow post https://stackoverflow.com/questions/12688717/round-up-from-5 -\item \code{iec}: the underlying rounding method is \code{round} -}} - -\item{...}{Additional arguments passed to other methods.} -} -\value{ -\code{x}, formatted into a string with the appropriate -format and \code{d} digits of precision. -} -\description{ -Formatting count, denominator and fraction values. -} -\examples{ -jjcsformat_count_denom_fraction(c(7, 10, 0.7)) -jjcsformat_count_denom_fraction(c(70000, 70001, 70000 / 70001)) -jjcsformat_count_denom_fraction(c(235, 235, 235 / 235)) -} diff --git a/tests/testthat/_snaps/coxreg_multivar.md b/tests/testthat/_snaps/coxreg_multivar.md index 0fbca21e..02aaec89 100644 --- a/tests/testthat/_snaps/coxreg_multivar.md +++ b/tests/testthat/_snaps/coxreg_multivar.md @@ -80,10 +80,10 @@ Coeff. (SE) p-value Estimate 50% CI —————————————————————————————————————————————————————————————————————————————————————————— Model Parameter - Treatment (B: Placebo vs. A: Drug X) 0 (0) <0.001 1.5170 (1.4, 1.6) - Treatment (C: Combination vs. A: Drug X) 1 (0) <0.001 1.9203 (1.8, 2.1) - Sex (M vs. F) 0 (0) 0.099 0.8813 (0.8, 0.9) - Age 0 (0) 0.646 1.0024 (1.0, 1.0) + Treatment (B: Placebo vs. A: Drug X) 0.4 (0.1) <0.001 1.5170 (1.4, 1.6) + Treatment (C: Combination vs. A: Drug X) 0.7 (0.1) <0.001 1.9203 (1.8, 2.1) + Sex (M vs. F) -0.1 (0.1) 0.099 0.8813 (0.8, 0.9) + Age 0.0 (0.0) 0.646 1.0024 (1.0, 1.0) # summarize_coxreg_multivar works with row splits diff --git a/tests/testthat/_snaps/jjcs_num_formats.md b/tests/testthat/_snaps/jjcs_num_formats.md index 15df9649..bbe44134 100644 --- a/tests/testthat/_snaps/jjcs_num_formats.md +++ b/tests/testthat/_snaps/jjcs_num_formats.md @@ -24,23 +24,26 @@ --- Code - format_value(values, format = jjcsformat_xx_SAS("xx.x (xx.xx)")) + format_value(values, format = jjcsformat_xx("xx.x (xx.xx)"), round_type = "sas") Output [1] "5.1 (7.89)" Code - format_value(values, format = jjcsformat_xx_R("xx.x (xx.xx)")) + format_value(values, format = jjcsformat_xx("xx.x (xx.xx)"), round_type = "iec") Output [1] "5.1 (7.89)" Code - format_value(c(5.05, values[2]), format = jjcsformat_xx_SAS("xx.x (xx.xx)")) + format_value(c(5.05, values[2]), format = jjcsformat_xx("xx.x (xx.xx)"), + round_type = "sas") Output [1] "5.1 (7.89)" Code - format_value(c(5.05, values[2]), format = jjcsformat_xx_R("xx.x (xx.xx)")) + format_value(c(5.05, values[2]), format = jjcsformat_xx("xx.x (xx.xx)"), + round_type = "iec") Output [1] "5.0 (7.89)" Code - format_value(c(5.15, values[2]), format = jjcsformat_xx_R("xx.x (xx.xx)")) + format_value(c(5.15, values[2]), format = jjcsformat_xx("xx.x (xx.xx)"), + round_type = "iec") Output [1] "5.2 (7.89)" Code @@ -48,11 +51,13 @@ Output [1] "5.2 (7.9)" Code - format_value(c(4.15, values[2]), format = jjcsformat_xx_SAS("xx.x (xx.xx)")) + format_value(c(4.15, values[2]), format = jjcsformat_xx("xx.x (xx.xx)"), + round_type = "sas") Output [1] "4.2 (7.89)" Code - format_value(c(4.15, values[2]), format = jjcsformat_xx_R("xx.x (xx.xx)")) + format_value(c(4.15, values[2]), format = jjcsformat_xx("xx.x (xx.xx)"), + round_type = "iec") Output [1] "4.2 (7.89)" Code @@ -60,7 +65,8 @@ Output [1] "4.2 (7.9)" Code - format_value(c(4.15, values[2]), format = jjcsformat_xx_SAS("xx.x (xx.x)")) + format_value(c(4.15, values[2]), format = jjcsformat_xx("xx.x (xx.x)"), + round_type = "sas") Output [1] "4.2 (7.9)" Code @@ -68,303 +74,316 @@ Output [1] "3.1 (7.9)" Code - format_value(c(3.15, values[2]), format = jjcsformat_xx_SAS("xx.x (xx.x)")) + format_value(c(3.15, values[2]), format = jjcsformat_xx("xx.x (xx.x)"), + round_type = "sas") Output [1] "3.2 (7.9)" Code - format_value(c(3.15, values[2]), format = jjcsformat_xx_R("xx.x (xx.x)")) + format_value(c(3.15, values[2]), format = jjcsformat_xx("xx.x (xx.x)"), + round_type = "iec") Output [1] "3.1 (7.9)" --- Code - format_value(values, format = jjcsformat_xx_SAS("xx / xx")) + format_value(values, format = jjcsformat_xx("xx / xx"), round_type = "sas") Output [1] "5.123456 / 7.891112" Code - format_value(values, format = jjcsformat_xx_SAS("xx. / xx.")) + format_value(values, format = jjcsformat_xx("xx. / xx."), round_type = "sas") Output [1] "5 / 8" Code - format_value(values, format = jjcsformat_xx_SAS("xx.x / xx.x")) + format_value(values, format = jjcsformat_xx("xx.x / xx.x"), round_type = "sas") Output [1] "5.1 / 7.9" Code - format_value(values, format = jjcsformat_xx_SAS("xx.xx / xx.xx")) + format_value(values, format = jjcsformat_xx("xx.xx / xx.xx"), round_type = "sas") Output [1] "5.12 / 7.89" Code - format_value(values, format = jjcsformat_xx_SAS("xx.xxx / xx.xxx")) + format_value(values, format = jjcsformat_xx("xx.xxx / xx.xxx"), round_type = "sas") Output [1] "5.123 / 7.891" Code - format_value(values, format = jjcsformat_xx_SAS("(xx, xx)")) + format_value(values, format = jjcsformat_xx("(xx, xx)"), round_type = "sas") Output [1] "(5.123456, 7.891112)" Code - format_value(values, format = jjcsformat_xx_SAS("(xx., xx.)")) + format_value(values, format = jjcsformat_xx("(xx., xx.)"), round_type = "sas") Output [1] "(5, 8)" Code - format_value(values, format = jjcsformat_xx_SAS("(xx.x, xx.x)")) + format_value(values, format = jjcsformat_xx("(xx.x, xx.x)"), round_type = "sas") Output [1] "(5.1, 7.9)" Code - format_value(values, format = jjcsformat_xx_SAS("(xx.xx, xx.xx)")) + format_value(values, format = jjcsformat_xx("(xx.xx, xx.xx)"), round_type = "sas") Output [1] "(5.12, 7.89)" Code - format_value(values, format = jjcsformat_xx_SAS("(xx.xxx, xx.xxx)")) + format_value(values, format = jjcsformat_xx("(xx.xxx, xx.xxx)"), round_type = "sas") Output [1] "(5.123, 7.891)" Code - format_value(values, format = jjcsformat_xx_SAS("(xx.xxxx, xx.xxxx)")) + format_value(values, format = jjcsformat_xx("(xx.xxxx, xx.xxxx)"), round_type = "sas") Output [1] "(5.1235, 7.8911)" Code - format_value(values, format = jjcsformat_xx_SAS("xx - xx")) + format_value(values, format = jjcsformat_xx("xx - xx"), round_type = "sas") Output [1] "5.123456 - 7.891112" Code - format_value(values, format = jjcsformat_xx_SAS("xx.x - xx.x")) + format_value(values, format = jjcsformat_xx("xx.x - xx.x"), round_type = "sas") Output [1] "5.1 - 7.9" Code - format_value(values, format = jjcsformat_xx_SAS("xx.xx - xx.xx")) + format_value(values, format = jjcsformat_xx("xx.xx - xx.xx"), round_type = "sas") Output [1] "5.12 - 7.89" Code - format_value(values, format = jjcsformat_xx_SAS("xx (xx)")) + format_value(values, format = jjcsformat_xx("xx (xx)"), round_type = "sas") Output [1] "5.123456 (7.891112)" Code - format_value(values, format = jjcsformat_xx_SAS("xx (xx.)")) + format_value(values, format = jjcsformat_xx("xx (xx.)"), round_type = "sas") Output [1] "5.123456 (8)" Code - format_value(values, format = jjcsformat_xx_SAS("xx (xx.x)")) + format_value(values, format = jjcsformat_xx("xx (xx.x)"), round_type = "sas") Output [1] "5.123456 (7.9)" Code - format_value(values, format = jjcsformat_xx_SAS("xx (xx.xx)")) + format_value(values, format = jjcsformat_xx("xx (xx.xx)"), round_type = "sas") Output [1] "5.123456 (7.89)" Code - format_value(values, format = jjcsformat_xx_SAS("xx. (xx.)")) + format_value(values, format = jjcsformat_xx("xx. (xx.)"), round_type = "sas") Output [1] "5 (8)" Code - format_value(values, format = jjcsformat_xx_SAS("xx.x (xx.x)")) + format_value(values, format = jjcsformat_xx("xx.x (xx.x)"), round_type = "sas") Output [1] "5.1 (7.9)" Code - format_value(values, format = jjcsformat_xx_SAS("xx.xx (xx.xx)")) + format_value(values, format = jjcsformat_xx("xx.xx (xx.xx)"), round_type = "sas") Output [1] "5.12 (7.89)" Code - format_value(values, format = jjcsformat_xx_SAS("xx.x, xx.x")) + format_value(values, format = jjcsformat_xx("xx.x, xx.x"), round_type = "sas") Output [1] "5.1, 7.9" Code - format_value(values, format = jjcsformat_xx_SAS("xx.x to xx.x")) + format_value(values, format = jjcsformat_xx("xx.x to xx.x"), round_type = "sas") Output [1] "5.1 to 7.9" Code - format_value(c(values, 10.1235), format = jjcsformat_xx_SAS("xx. (xx. - xx.)")) + format_value(c(values, 10.1235), format = jjcsformat_xx("xx. (xx. - xx.)"), + round_type = "sas") Output [1] "5 (8 - 10)" Code - format_value(c(values, 10.1235), format = jjcsformat_xx_SAS( - "xx.x (xx.x - xx.x)")) + format_value(c(values, 10.1235), format = jjcsformat_xx("xx.x (xx.x - xx.x)"), + round_type = "sas") Output [1] "5.1 (7.9 - 10.1)" Code - format_value(c(values, 10.1235), format = jjcsformat_xx_SAS( - "xx.xx (xx.xx - xx.xx)")) + format_value(c(values, 10.1235), format = jjcsformat_xx("xx.xx (xx.xx - xx.xx)"), + round_type = "sas") Output [1] "5.12 (7.89 - 10.12)" Code - format_value(c(values, 10.1235), format = jjcsformat_xx_SAS( - "xx.xxx (xx.xxx - xx.xxx)")) + format_value(c(values, 10.1235), format = jjcsformat_xx( + "xx.xxx (xx.xxx - xx.xxx)"), round_type = "sas") Output [1] "5.123 (7.891 - 10.124)" Code - format_value(NULL, jjcsformat_xx_SAS("xx")) + format_value(NULL, jjcsformat_xx("xx"), round_type = "sas") Output [1] "" Code - format_value(c(500), jjcsformat_xx_SAS("N=xx")) + format_value(c(500), jjcsformat_xx("N=xx"), round_type = "sas") Output [1] "N=500" Code - format_value(c(500), jjcsformat_xx_SAS("(N=xx)")) + format_value(c(500), jjcsformat_xx("(N=xx)"), round_type = "sas") Output [1] "(N=500)" --- Code - format_value(0, jjcsformat_xx_SAS("xx.")) + format_value(0, jjcsformat_xx("xx."), round_type = "sas") Output [1] "0" Code - format_value(0, jjcsformat_xx_SAS("xx.x")) + format_value(0, jjcsformat_xx("xx.x"), round_type = "sas") Output [1] "0.0" Code - format_value(0, jjcsformat_xx_SAS("xx.xx")) + format_value(0, jjcsformat_xx("xx.xx"), round_type = "sas") Output [1] "0.00" Code - format_value(0, jjcsformat_xx_SAS("xx.xxx")) + format_value(0, jjcsformat_xx("xx.xxx"), round_type = "sas") Output [1] "0.000" Code - format_value(0, jjcsformat_xx_SAS("xx.xxxx")) + format_value(0, jjcsformat_xx("xx.xxxx"), round_type = "sas") Output [1] "0.0000" # jjcsformats NA works Code - format_value(NA, jjcsformat_xx_SAS("xx."), na_str = "-") + format_value(NA, jjcsformat_xx("xx."), round_type = "sas", na_str = "-") Output [1] "-" Code - format_value(NA, jjcsformat_xx_SAS("xx"), na_str = "-") + format_value(NA, jjcsformat_xx("xx"), round_type = "sas", na_str = "-") Output [1] "-" --- Code - format_value(c(1.2, NA, NA), jjcsformat_xx_SAS("xx.x (xx.x - xx.x)"), na_str = "NA") + format_value(c(1.2, NA, NA), jjcsformat_xx("xx.x (xx.x - xx.x)"), round_type = "sas", + na_str = "NE") Output [1] "1.2 (NE - NE)" Code - format_value(c(1.2, NA, NA), jjcsformat_xx_SAS("xx.x (xx.x - xx.x)"), na_str = "x") + format_value(c(1.2, NA, NA), jjcsformat_xx("xx.x (xx.x - xx.x)"), round_type = "sas", + na_str = "x") Output [1] "1.2 (x - x)" Code - format_value(c(NA, NA, NA), jjcsformat_xx_SAS("xx.x (xx.x - xx.x)"), na_str = "x") + format_value(c(NA, NA, NA), jjcsformat_xx("xx.x (xx.x - xx.x)"), round_type = "sas", + na_str = "x") Output [1] "x" --- Code - format_value(c(NA, NA), format = jjcsformat_xx_SAS("xx.x - xx.x"), na_str = c( - "hi", "lo")) + format_value(c(NA, NA), format = jjcsformat_xx("xx.x - xx.x"), round_type = "sas", + na_str = c("hi", "lo")) Output [1] "hi - lo" Code - format_value(c(NA, 5.2), format = jjcsformat_xx_SAS("xx.x - xx.x"), na_str = "what") + format_value(c(NA, 5.2), format = jjcsformat_xx("xx.x - xx.x"), round_type = "sas", + na_str = "what") Output [1] "what - 5.2" Code - format_value(c(NA, 5.2), format = jjcsformat_xx_SAS("xx.x - xx.x"), na_str = c( - "hi", "lo")) + format_value(c(NA, 5.2), format = jjcsformat_xx("xx.x - xx.x"), round_type = "sas", + na_str = c("hi", "lo")) Output [1] "hi - 5.2" Code - format_value(c(NA, NA), format = jjcsformat_xx_SAS("xx.x - xx.x"), na_str = "what") + format_value(c(NA, NA), format = jjcsformat_xx("xx.x - xx.x"), round_type = "sas", + na_str = "what") Output [1] "what" --- Code - format_value(NA, format = jjcsformat_xx_SAS("xx.x"), na_str = character()) + format_value(NA, format = jjcsformat_xx("xx.x"), round_type = "sas", na_str = character()) Output [1] "NA" Code - format_value(NA, format = jjcsformat_xx_SAS("xx.x"), na_str = NA_character_) + format_value(NA, format = jjcsformat_xx("xx.x"), round_type = "sas", na_str = NA_character_) Output [1] "NA" --- Code - format_value(c(6.23, NA, NA), format = jjcsformat_xx_SAS("xx.x (xx.xx, xx.xx)"), - na_str = "-") + format_value(c(6.23, NA, NA), format = jjcsformat_xx("xx.x (xx.xx, xx.xx)"), + round_type = "sas", na_str = "-") Output [1] "6.2 (-, -)" Code - format_value(c(NA, NA, NA), format = jjcsformat_xx_SAS("xx.x (xx.xx, xx.xx)"), - na_str = "-") + format_value(c(NA, NA, NA), format = jjcsformat_xx("xx.x (xx.xx, xx.xx)"), + round_type = "sas", na_str = "-") Output [1] "-" Code - format_value(c(6.23, NA, NA), format = jjcsformat_xx_SAS("xx.x (xx.xx, xx.xx)"), - na_str = c("-", "x", "x")) + format_value(c(6.23, NA, NA), format = jjcsformat_xx("xx.x (xx.xx, xx.xx)"), + round_type = "sas", na_str = c("-", "x", "x")) Output [1] "6.2 (-, x)" Code - format_value(c(6.23, NA, NA), format = jjcsformat_xx_SAS("xx.x (xx.xx, xx.xx)"), - na_str = c("-", "x", "y")) + format_value(c(6.23, NA, NA), format = jjcsformat_xx("xx.x (xx.xx, xx.xx)"), + round_type = "sas", na_str = c("-", "x", "y")) Output [1] "6.2 (-, x)" # jjcsformats count_fraction works Code - format_value(cdf, format = jjcsformat_count_denom_fraction) + format_value(cdf, format = jjcsformat_count_denom_fraction, round_type = "sas") Output [1] "5/2000 (0.3%)" Code - format_value(cf, format = jjcsformat_count_fraction) + format_value(cf, format = jjcsformat_count_fraction, round_type = "sas") Output [1] "5 (0.3%)" Code - format_value(cf, format = "xx (xx.x%)") + format_value(cf, format = "xx (xx.x%)", round_type = "iec") Output [1] "5 (0.2%)" Code - format_value(c(2000, 2001, 2000 / 2001), format = jjcsformat_count_denom_fraction) + format_value(c(2000, 2001, 2000 / 2001), format = jjcsformat_count_denom_fraction, + round_type = "sas") Output [1] "2000/2001 (>99.9%)" Code - format_value(c(2000, 2000 / 2001), format = "xx (xx.x%)") + format_value(c(2000, 2000 / 2001), format = "xx (xx.x%)", round_type = "iec") Output [1] "2000 (100.0%)" Code - format_value(c(1, 2001, 1 / 2001), format = jjcsformat_count_denom_fraction) + format_value(c(1, 2001, 1 / 2001), format = jjcsformat_count_denom_fraction, + round_type = "sas") Output [1] "1/2001 (<0.1%)" Code - format_value(c(1, 1 / 2001), format = "xx (xx.x%)") + format_value(c(1, 1 / 2001), format = "xx (xx.x%)", round_type = "iec") Output [1] "1 (0.0%)" Code - format_value(c(3, 3, 3 / 3), format = jjcsformat_count_denom_fraction) + format_value(c(3, 3, 3 / 3), format = jjcsformat_count_denom_fraction, + round_type = "sas") Output [1] "3/3 (100.0%)" Code - format_value(c(3, 3 / 3), format = "xx (xx.x%)") + format_value(c(3, 3 / 3), format = "xx (xx.x%)", round_type = "iec") Output [1] "3 (100.0%)" Code - format_value(rep(NA, 3), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), na_str = rep( - "NA", 10)) + format_value(rep(NA, 3), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), + round_type = "sas", na_str = rep("NE", 10)) Output [1] "NE (NE, NE)" Code - format_value(rep(NA, 3), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), na_str = rep( - "NA", 1)) + format_value(rep(NA, 3), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), + round_type = "sas", na_str = rep("NA", 1)) Output [1] "NA" Code - format_value(rep(NA, 3), format = jjcsformat_xx("xx.x (xx.x, xx.x)")) + format_value(rep(NA, 3), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), + round_type = "sas", na_str = "NA") Output [1] "NA" Code - format_value(c(1, rep(NA, 2)), format = jjcsformat_xx("xx.x (xx.x, xx.x)")) + format_value(c(1, rep(NA, 2)), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), + round_type = "sas", na_str = rep("NE", 10)) Output [1] "1.0 (NE, NE)" Code format_value(c(1, rep(NA, 2)), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), - na_str = c("ne1", "ne2", "ne3")) + round_type = "sas", na_str = c("ne1", "ne2", "ne3")) Output [1] "1.0 (ne1, ne2)" diff --git a/tests/testthat/_snaps/jjcsformats.md b/tests/testthat/_snaps/jjcsformats.md index b7ee6ced..6a4acf60 100644 --- a/tests/testthat/_snaps/jjcsformats.md +++ b/tests/testthat/_snaps/jjcsformats.md @@ -210,6 +210,10 @@ jjcsformat_pval_fct(0.005)(0.00499) Output [1] "0.00499" + Code + jjcsformat_pval_fct(0.005)(0.000499) + Output + [1] "<0.001" Code jjcsformat_pval_fct(0)(0.0048) Output @@ -246,4 +250,12 @@ jjcsformat_pval_fct(0)(0.9990000001) Output [1] ">0.999" + Code + jjcsformat_pval_fct(0)(NA_real_, na_str = "ne") + Output + [1] "ne" + Code + jjcsformat_pval_fct(5e-04)(NA_real_, na_str = "ne") + Output + [1] "ne" From 22cfc2f67104cfc0870de3846101907383cfb8fd Mon Sep 17 00:00:00 2001 From: iaugusty Date: Mon, 8 Sep 2025 13:00:31 +0000 Subject: [PATCH 04/49] devtools::document() --- NAMESPACE | 2 +- man/count_fraction.Rd | 59 ++++++++++++++-------- man/format_xx_fct.Rd | 48 ------------------ man/fraction_count_denom.Rd | 41 ---------------- man/jjcsformat_pval_fct.Rd | 42 ---------------- man/jjcsformat_range_fct.Rd | 42 ---------------- man/jjcsformat_xx.Rd | 98 ++++++++++++++++++++++++++++++++++--- 7 files changed, 131 insertions(+), 201 deletions(-) delete mode 100644 man/format_xx_fct.Rd delete mode 100644 man/fraction_count_denom.Rd delete mode 100644 man/jjcsformat_pval_fct.Rd delete mode 100644 man/jjcsformat_range_fct.Rd diff --git a/NAMESPACE b/NAMESPACE index 8790074a..64a51cf8 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -42,7 +42,6 @@ export(find_missing_chg_after_avisit) export(fit_ancova) export(fit_mmrm_j) export(format_stats) -export(format_xx_fct) export(get_mmrm_lsmeans) export(get_ref_info) export(get_titles_from_file) @@ -55,6 +54,7 @@ export(inches_to_spaces) export(insert_blank_line) export(jj_complex_scorefun) export(jjcs_num_formats) +export(jjcsformat_cnt_den_fract_fct) export(jjcsformat_count_denom_fraction) export(jjcsformat_count_fraction) export(jjcsformat_fraction_count_denom) diff --git a/man/count_fraction.Rd b/man/count_fraction.Rd index 3a9aba9c..f24a603a 100644 --- a/man/count_fraction.Rd +++ b/man/count_fraction.Rd @@ -1,28 +1,34 @@ % Generated by roxygen2: do not edit by hand % Please edit documentation in R/jjcsformats.R -\name{count_fraction} -\alias{count_fraction} +\name{count and fraction related formatting functions} +\alias{count and fraction related formatting functions} +\alias{jjcsformat_cnt_den_fract_fct} \alias{jjcsformat_count_fraction} -\title{Formatting count and fraction values} +\alias{jjcsformat_count_denom_fraction} +\alias{jjcsformat_fraction_count_denom} +\title{Formatting functions for count and fraction, and for count denominator and fraction values} \usage{ -jjcsformat_count_fraction(x, d = 1, roundmethod = c("sas", "iec"), ...) +jjcsformat_cnt_den_fract_fct( + d = 1, + type = c("count_fraction", "count_denom_fraction", "fraction_count_denom"), + verbose = FALSE +) + +jjcsformat_count_fraction(x, round_type = c("sas", "iec"), output, ...) + +jjcsformat_count_denom_fraction(x, round_type = c("sas", "iec"), output, ...) + +jjcsformat_fraction_count_denom(x, round_type = c("sas", "iec"), output, ...) } \arguments{ -\item{x}{(\verb{numeric vector})\cr Vector with elements \code{num} and \code{fraction} or \code{num}, \code{denom} and \code{fraction}.} - \item{d}{(\code{numeric(1)})\cr Number of digits to round fraction to (default = 1)} -\item{roundmethod}{(\code{string})\cr choice of rounding methods. Options are: -\itemize{ -\item \code{sas}: the underlying rounding method is \code{tidytlg::roundSAS}, where \cr -roundSAS comes from this Stack Overflow post https://stackoverflow.com/questions/12688717/round-up-from-5 -\item \code{iec}: the underlying rounding method is \code{round} -}} +\item{x}{(\verb{numeric vector})\cr Vector with elements \code{num} and \code{fraction} or \code{num}, \code{denom} and \code{fraction}.} \item{...}{Additional arguments passed to other methods.} } \value{ -A string in the format \verb{count / denom (ratio percent)}. If \code{count} +A formatting function to format input into string in the format \verb{count / denom (ratio percent)}. If \code{count} is 0, the format is \code{0}. If fraction is >0.99, the format is \verb{count / denom (>99.9 percent)} } @@ -32,14 +38,27 @@ consideration when count is 0, or fraction is 1. \cr See also: \code{\link[tern:format_count_fraction_fixed_dp]{tern::format_count_fraction_fixed_dp()}} } \examples{ + jjcsformat_count_fraction(c(7, 0.7)) -jjcsformat_count_fraction(c(70000, 0.9999999)) -jjcsformat_count_fraction(c(70000, 1)) +jjcsformat_count_fraction(c(70000, 70000 / 70001)) +jjcsformat_count_fraction(c(235, 235 / 235)) +fmt <- jjcsformat_cnt_den_fract_fct(type = "count_fraction", d = 2) +fmt(c(23, 23 / 235)) + +jjcsformat_count_denom_fraction(c(7, 10, 0.7)) +jjcsformat_count_denom_fraction(c(70000, 70001, 70000 / 70001)) +jjcsformat_count_denom_fraction(c(235, 235, 235 / 235)) +fmt <- jjcsformat_cnt_den_fract_fct(type = "count_denom_fraction", d = 2) +fmt(c(23, 235, 23 / 235)) + +jjcsformat_fraction_count_denom(c(7, 10, 0.7)) +jjcsformat_fraction_count_denom(c(70000, 70001, 70000 / 70001)) +jjcsformat_fraction_count_denom(c(235, 235, 235 / 235)) +fmt <- jjcsformat_cnt_den_fract_fct(type = "fraction_count_denom", d = 2) +fmt(c(23, 235, 23 / 235)) } \seealso{ -Other JJCS formats: -\code{\link{format_xx_fct}()}, -\code{\link{jjcsformat_pval_fct}()}, -\code{\link{jjcsformat_range_fct}()} +Other JJCS formatting functions: +\code{\link{jjcsformat_xx}()} } -\concept{JJCS formats} +\concept{JJCS formatting functions} diff --git a/man/format_xx_fct.Rd b/man/format_xx_fct.Rd deleted file mode 100644 index 5020e06c..00000000 --- a/man/format_xx_fct.Rd +++ /dev/null @@ -1,48 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jjcsformats.R -\name{format_xx_fct} -\alias{format_xx_fct} -\title{Function factory for xx style formatting} -\usage{ -format_xx_fct( - roundmethod = c("sas", "iec"), - na_str_dflt = "NE", - replace_na_dflt = TRUE -) -} -\arguments{ -\item{roundmethod}{(\code{string})\cr choice of rounding methods. Options are: -\itemize{ -\item \code{sas}: the underlying rounding method is \code{tidytlg::roundSAS}, where \cr -roundSAS comes from this Stack Overflow post https://stackoverflow.com/questions/12688717/round-up-from-5 -\item \code{iec}: the underlying rounding method is \code{round} -}} - -\item{na_str_dflt}{(\code{character})\cr Character to represent NA value} - -\item{replace_na_dflt}{(\code{logical(1)})\cr Should an \code{na_string} of "NA" within -the formatters framework be overridden by \code{na_str_default}? Defaults to -\code{TRUE}, as a way to have a different default na string behavior from the -base \code{formatters} framework.} -} -\value{ -\code{format_xx_fct()} format function that can be used in rtables formatting calls -} -\description{ -A function factory to generate formatting functions for value -formatting that support the xx style format and control the rounding method. -} -\examples{ -jjcsformat_xx_SAS <- format_xx_fct(roundmethod = "sas") -jjcsformat_xx <- jjcsformat_xx_SAS -rcell(c(1.453), jjcsformat_xx("xx.xx")) -rcell(c(), jjcsformat_xx("xx.xx")) -rcell(c(1.453, 2.45638), jjcsformat_xx("xx.xx (xx.xxx)")) -} -\seealso{ -Other JJCS formats: -\code{\link{count_fraction}}, -\code{\link{jjcsformat_pval_fct}()}, -\code{\link{jjcsformat_range_fct}()} -} -\concept{JJCS formats} diff --git a/man/fraction_count_denom.Rd b/man/fraction_count_denom.Rd deleted file mode 100644 index 9cd2084c..00000000 --- a/man/fraction_count_denom.Rd +++ /dev/null @@ -1,41 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jjcsformats.R -\name{jjcsformat_fraction_count_denom} -\alias{jjcsformat_fraction_count_denom} -\title{Formatting fraction, count and denominator values.} -\usage{ -jjcsformat_fraction_count_denom(x, d = 1, roundmethod = c("sas", "iec"), ...) -} -\arguments{ -\item{x}{(\verb{numeric vector})\cr Vector with elements \code{num} and \code{fraction} or \code{num}, \code{denom} and \code{fraction}.} - -\item{d}{(\code{numeric(1)})\cr Number of digits to round fraction to (default = 1)} - -\item{roundmethod}{(\code{string})\cr choice of rounding methods. Options are: -\itemize{ -\item \code{sas}: the underlying rounding method is \code{tidytlg::roundSAS}, where \cr -roundSAS comes from this Stack Overflow post https://stackoverflow.com/questions/12688717/round-up-from-5 -\item \code{iec}: the underlying rounding method is \code{round} -}} - -\item{...}{Additional arguments passed to other methods.} -} -\value{ -\code{x} formatted as a string with \code{d} digits of precision, -with special cased values as described in Details above. -} -\description{ -Formatting fraction, count and denominator values. -} -\details{ -Formats a 3-dimensional value such that percent values -near 0 or 100\% are formatted as .e.g, \code{"<0.1\%"} and -\code{">99.9\%"}, where the cutoff is controlled by \code{d}, and -formatted as \code{"xx.x\% (xx/xx)"} otherwise, with the -precision of the percent also controlled by \code{d}. -} -\examples{ -jjcsformat_fraction_count_denom(c(7, 10, 0.7)) -jjcsformat_fraction_count_denom(c(70000, 70001, 70000 / 70001)) -jjcsformat_fraction_count_denom(c(235, 235, 235 / 235)) -} diff --git a/man/jjcsformat_pval_fct.Rd b/man/jjcsformat_pval_fct.Rd deleted file mode 100644 index d37949e6..00000000 --- a/man/jjcsformat_pval_fct.Rd +++ /dev/null @@ -1,42 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jjcsformats.R -\name{jjcsformat_pval_fct} -\alias{jjcsformat_pval_fct} -\title{Function factory for p-value formatting} -\usage{ -jjcsformat_pval_fct(alpha = 0.05) -} -\arguments{ -\item{alpha}{(\code{numeric})\cr the significance level to account for during rounding.} -} -\value{ -The p-value in the standard format. If \code{count} is 0, the format is \code{0}. -If it is smaller than 0.001, then \verb{<0.001}, if it is larger than 0.999, then -\verb{>0.999} is returned. Otherwise, 3 digits are used. In the special case that -rounding from below would make the string equal to the specified \code{alpha}, -then a higher number of digits is used to be able to still see the difference. -For example, 0.0048 is not rounded to 0.005 but stays at 0.0048 if \code{alpha = 0.005} -is set. -} -\description{ -A function factory to generate formatting functions for p-value -formatting that support rounding close to the significance level specified. -} -\examples{ -my_pval_format <- jjcsformat_pval_fct(0.005) -my_pval_format(0.2802359) -my_pval_format(0.0048) -my_pval_format(0.00499) -my_pval_format(0.004999999) -my_pval_format(0.0051) -my_pval_format(0.0009) -my_pval_format(0.9991) - -} -\seealso{ -Other JJCS formats: -\code{\link{count_fraction}}, -\code{\link{format_xx_fct}()}, -\code{\link{jjcsformat_range_fct}()} -} -\concept{JJCS formats} diff --git a/man/jjcsformat_range_fct.Rd b/man/jjcsformat_range_fct.Rd deleted file mode 100644 index 10cc0927..00000000 --- a/man/jjcsformat_range_fct.Rd +++ /dev/null @@ -1,42 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jjcsformats.R -\name{jjcsformat_range_fct} -\alias{jjcsformat_range_fct} -\title{Function factory for range with censoring information formatting} -\usage{ -jjcsformat_range_fct(str) -} -\arguments{ -\item{str}{(\code{string})\cr the format specifying the number of digits to be used, -for the range values, e.g. \code{"xx.xx"}.} -} -\value{ -A function that formats a numeric vector with 4 elements: -\itemize{ -\item minimum -\item maximum -\item censored minimum? (1 if censored, 0 if event) -\item censored maximum? (1 if censored, 0 if event) -The range along with the censoring information is returned as a string -with the specified numeric format as \verb{(min, max)}, and the \code{+} is appended -to \code{min} or \code{max} if these have been censored. -} -} -\description{ -A function factory to generate formatting functions for range formatting -that includes information about the censoring of survival times. -} -\examples{ -my_range_format <- jjcsformat_range_fct("xx.xx") -my_range_format(c(0.35235, 99.2342, 1, 0)) -my_range_format(c(0.35235, 99.2342, 0, 1)) -my_range_format(c(0.35235, 99.2342, 0, 0)) -my_range_format(c(0.35235, 99.2342, 1, 1)) -} -\seealso{ -Other JJCS formats: -\code{\link{count_fraction}}, -\code{\link{format_xx_fct}()}, -\code{\link{jjcsformat_pval_fct}()} -} -\concept{JJCS formats} diff --git a/man/jjcsformat_xx.Rd b/man/jjcsformat_xx.Rd index 87465f34..99c44b8e 100644 --- a/man/jjcsformat_xx.Rd +++ b/man/jjcsformat_xx.Rd @@ -2,19 +2,103 @@ % Please edit documentation in R/jjcsformats.R \name{jjcsformat_xx} \alias{jjcsformat_xx} -\title{Formatting of values} +\alias{jjcsformat_pval_fct} +\alias{jjcsformat_range_fct} +\title{Utility for specifying custom formats} \usage{ -jjcsformat_xx(str, na_str = na_str_dflt) +jjcsformat_xx( + str, + na_str = na_str_dflt, + na_str_dflt = "NE", + replace_na_dflt = TRUE +) + +jjcsformat_pval_fct(alpha = 0.05) + +jjcsformat_range_fct(str) } \arguments{ -\item{str}{(\code{character})\cr the formatting that is required specified as a text string, eg "xx.xx"} +\item{str}{(\code{string})\cr the format specifying the number of digits to be used, +for the range values, e.g. \code{"xx.xx"}.} + +\item{na_str_dflt}{Character to represent NA value} + +\item{replace_na_dflt}{logical(1). Should an \code{na_string} of "NA" within +the formatters framework be overridden by \code{na_str_default}? Defaults to +\code{TRUE}, as a way to have a different default na string behavior from the +base \code{formatters} framework.} -\item{na_str}{(\code{character})\cr Na string that will be passed from \code{formatters} into -our formatting functions.} +\item{alpha}{(\code{numeric})\cr the significance level to account for during rounding.} } \value{ -a formatting function with \code{"sas"}-style rounding. +Either a supported format string, or a formatting function that can be +used as format in \code{formatters::format_value} + +The p-value in the standard format. If \code{count} is 0, the format is \code{0}. +If it is smaller than 0.001, then \verb{<0.001}, if it is larger than 0.999, then +\verb{>0.999} is returned. Otherwise, 3 digits are used. In the special case that +rounding from below would make the string equal to the specified \code{alpha}, +then a higher number of digits is used to be able to still see the difference. +For example, 0.0048 is not rounded to 0.005 but stays at 0.0048 if \code{alpha = 0.005} +is set. + +A function that formats a numeric vector with 4 elements: +\itemize{ +\item minimum +\item maximum +\item censored minimum? (1 if censored, 0 if event) +\item censored maximum? (1 if censored, 0 if event) +The range along with the censoring information is returned as a string +with the specified numeric format as \verb{(min, max)}, and the \code{+} is appended +to \code{min} or \code{max} if these have been censored. +} } \description{ -jjcs formatting function +Utility for specifying custom formats that can be used as a format in \code{formatters::format_value} + +A function factory to generate formatting functions for p-value +formatting that support rounding close to the significance level specified. + +A function factory to generate formatting functions for range formatting +that includes information about the censoring of survival times. +} +\examples{ +value <- c(1.65, 8.645) +fmt <- jjcsformat_xx("xx.x") +is.function(fmt) +fmt +format_value(value[1], fmt, round_type = "sas") +format_value(value[1], fmt, round_type = "iec") +fmt(value[1]) + +fmt2 <- jjcsformat_xx("xx.x (xx.xxx)") +is.function(fmt2) +value <- c(1.65, 8.645) +format_value(value, fmt2, round_type = "sas") +format_value(value, fmt2, round_type = "iec") +#only possible when resulting format is a function +fmt2(value, round_type = "sas") + +value <- c(1.65, NA) +format_value(value, fmt2, round_type = "iec", na_str = c("ne1", "ne2")) +fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) +my_pval_format <- jjcsformat_pval_fct(0.005) +my_pval_format(0.2802359) +my_pval_format(0.0048) +my_pval_format(0.00499) +my_pval_format(0.004999999) +my_pval_format(0.0051) +my_pval_format(0.0009) +my_pval_format(0.9991) + +my_range_format <- jjcsformat_range_fct("xx.xx") +my_range_format(c(0.35235, 99.2342, 1, 0)) +my_range_format(c(0.35235, 99.2342, 0, 1)) +my_range_format(c(0.35235, 99.2342, 0, 0)) +my_range_format(c(0.35235, 99.2342, 1, 1)) +} +\seealso{ +Other JJCS formatting functions: +\code{\link{count and fraction related formatting functions}} } +\concept{JJCS formatting functions} From 094f9fca7ebca11fe9e5ddffbfa4a3f1443e6466 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Mon, 8 Sep 2025 13:01:36 +0000 Subject: [PATCH 05/49] round_type io roundmethod --- R/column_stats.R | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/R/column_stats.R b/R/column_stats.R index 02cfffaf..a2dbb9a7 100644 --- a/R/column_stats.R +++ b/R/column_stats.R @@ -1,13 +1,13 @@ -calc_one_visit <- function(datvec, decimal, statnm, visit, varnm, roundmethod = c("sas", "iec"), exclude_visits, +calc_one_visit <- function(datvec, decimal, statnm, visit, varnm, round_type = c("sas", "iec"), exclude_visits, var_names = c("AVAL", "CHG", "BASE")) { - roundmethod <- match.arg(roundmethod) + round_type <- match.arg(round_type) if (is.na(decimal)) { decimal <- 0 } if ((varnm == var_names[2] || varnm == var_names[3]) && (visit %in% exclude_visits)) { return(NULL) } - if (roundmethod == "sas") { + if (round_type == "sas") { switch(statnm, N = length(stats::na.omit(datvec)), SE = format( From 32b15c60f9e08cc9e1c43d455b0974731e0b4dd3 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Mon, 8 Sep 2025 13:15:15 +0000 Subject: [PATCH 06/49] extra test snapshot --- ...unco_utils_default_stats_formats_labels.md | 96 +++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md diff --git a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md new file mode 100644 index 00000000..8adf7c39 --- /dev/null +++ b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md @@ -0,0 +1,96 @@ +# get_stats works as expected + + Code + res + Output + [1] "quantiles_lower" "median_ci_3d" "quantiles_upper" + [4] "range_with_cens_info" + +# get_formats_from_stats works as expected + + Code + res + Output + $quantiles_upper + function( + x, + output = c("ascii", "html"), + round_type = c("sas", "iec"), + #na_str = na_str_dflt + na_str) { + # if (anyNA(na_str) || (replace_na_dflt && any(na_str == "NA"))) { + # na_inds <- which(is.na(na_str) | (replace_na_dflt & na_str == "NA")) + # na_str[na_inds] <- rep(na_str_dflt, length.out = length(na_str))[na_inds] + # } + if (length(x) == 0 || isTRUE(all(x == ""))) { + return(NULL) + } else if (!length(positions[[1]]) == length(x)) { + stop( + "Error: input str in call to jjcsformat_xx must contain same number of xx as the number of stats." + ) + } + + round_type <- match.arg(round_type) + if (verbose) print(paste0("round_type in rtable_format: ", round_type)) + + values <- Map(y = x, fun = roundings, na_str = na_str, output = output, function(y, fun, na_str, output) + fun(y, na_str = na_str, round_type = round_type, output = output)) + + regmatches(x = str, m = positions)[[1]] <- values + return(str) + } + + + $range_with_cens_info + function(x, output, round_type = c("iec", "sas"), ...) { + round_type <- match.arg(round_type) + checkmate::assert_numeric( + x, + len = 4L, + finite = TRUE, + any.missing = FALSE + ) + checkmate::assert_true(all(x[c(3, 4)] %in% c(0, 1))) + + res <- vapply(x[c(1, 2)], FUN = function(x){ + format_value(x, format_xx, round_type = round_type) + }, character(1)) + if (x[3] == 1) res[1] <- paste0(res[1], "+") + if (x[4] == 1) res[2] <- paste0(res[2], "+") + paste0("(", res[1], ", ", res[2], ")") + } + + + +# get_labels_from_stats works as expected + + Code + res + Output + $quantiles_upper + [1] "75%-ile (95% CI)" + + $range_with_cens_info + [1] "Min, max" + + +# get_label_attr_from_stats works as expected + + Code + res + Output + stats1 stats2 + "bla" "boo" + +# get_indents_from_stats works as expected + + Code + res + Output + $quantiles_upper + [1] 0 + + $range_with_cens_info + [1] 0 + + From 280656c1eb6cf51466f6ee990cea18b4e5c9c133 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Mon, 8 Sep 2025 14:27:39 +0000 Subject: [PATCH 07/49] updates to some junco default formats + impacted tests --- R/junco_utils_default_stats_formats_labels.R | 13 ++++++++++++- ...junco_utils_default_stats_formats_labels.md | 18 ++++++++---------- tests/testthat/_snaps/summarize_ancova.md | 2 +- 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/R/junco_utils_default_stats_formats_labels.R b/R/junco_utils_default_stats_formats_labels.R index 892911f3..65016e81 100644 --- a/R/junco_utils_default_stats_formats_labels.R +++ b/R/junco_utils_default_stats_formats_labels.R @@ -254,6 +254,7 @@ junco_default_formats_start <- c( adj_mean_ci = jjcsformat_xx("(xx.xxx, xx.xxx)"), adj_mean_est_ci = jjcsformat_xx("xx.xxx (xx.xxx, xx.xxx)"), change = "xx.x%", + cv = jjcsformat_xx("xx.xx"), diff = jjcsformat_xx("xx.x"), diff_ci = jjcsformat_xx("(xx.x, xx.x)"), diff_est_ci = jjcsformat_xx("xx.x (xx.x, xx.x)"), @@ -262,6 +263,10 @@ junco_default_formats_start <- c( diff_mean_est_ci = jjcsformat_xx("xx.xxx (xx.xxx, xx.xxx)"), event_free_ci = jjcsformat_xx("xx.xx (xx.xx, xx.xx)"), event_free_rate = jjcsformat_xx("xx.xx"), + geom_sd = jjcsformat_xx("xx.xxx"), + geom_se = jjcsformat_xx("xx.xxx"), + geom_mean_sd = jjcsformat_xx("xx.xx (xx.xxx)"), + geom_mean_se = jjcsformat_xx("xx.xx (xx.xxx)"), hr = jjcsformat_xx("xx.xx"), hr_ci = jjcsformat_xx("(xx.xx, xx.xx)"), hr_ci_3d = jjcsformat_xx("xx.xx (xx.xx, xx.xx)"), @@ -272,10 +277,14 @@ junco_default_formats_start <- c( lsmean_diffci = jjcsformat_xx("xx.xx (xx.xx, xx.xx)"), lsmean_diff_ci = jjcsformat_xx("(xx.xx, xx.xx)"), lsmean_se = jjcsformat_xx("xx.xx (xx.xx)"), + mean = jjcsformat_xx("xx.xx"), mean_sd = jjcsformat_xx("xx.xx (xx.xxx)"), + mean_se = jjcsformat_xx("xx.xx (xx.xxx)"), + mean_pval = jjcsformat_pval_fct(0), median = jjcsformat_xx("xx.xx"), median_ci = jjcsformat_xx("(xx.xx, xx.xx)"), median_ci_3d = jjcsformat_xx("xx.xx (xx.xx, xx.xx)"), + median_range = jjcsformat_xx("xx.xx (xx.x, xx.x)"), n = jjcsformat_xx("xx."), n_tot = jjcsformat_xx("xx."), n_tot_events = jjcsformat_xx("xx."), @@ -285,12 +294,14 @@ junco_default_formats_start <- c( pvalue = jjcsformat_pval_fct(0), p_value = jjcsformat_pval_fct(0), quantiles = jjcsformat_xx("xx.xx, xx.xx"), - range = jjcsformat_xx("xx.xx, xx.xx"), + range = jjcsformat_xx("xx.x, xx.x"), range_with_cens_info = jjcsformat_range_fct("xx.xx"), rate_ci = jjcsformat_xx("(xx.xx, xx.xx)"), rate_se = jjcsformat_xx("xx.xx"), rel_risk_ci = jjcsformat_xx("xx.xx (xx.xx - xx.xx)"), quantiles_upper = jjcsformat_xx("xx.xx (xx.xx, xx.xx)"), + sd = jjcsformat_xx("xx.xxx"), + se = jjcsformat_xx("xx.xxx"), n_altdf = "xx", n_df = "xx", n_rowdf = "xx", diff --git a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md index 8adf7c39..7ef632b7 100644 --- a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md +++ b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md @@ -14,14 +14,13 @@ $quantiles_upper function( x, - output = c("ascii", "html"), + output, round_type = c("sas", "iec"), - #na_str = na_str_dflt - na_str) { - # if (anyNA(na_str) || (replace_na_dflt && any(na_str == "NA"))) { - # na_inds <- which(is.na(na_str) | (replace_na_dflt & na_str == "NA")) - # na_str[na_inds] <- rep(na_str_dflt, length.out = length(na_str))[na_inds] - # } + na_str = na_str_dflt) { + if (anyNA(na_str) || (replace_na_dflt && any(na_str == "NA"))) { + na_inds <- which(is.na(na_str) | (replace_na_dflt & na_str == "NA")) + na_str[na_inds] <- rep(na_str_dflt, length.out = length(na_str))[na_inds] + } if (length(x) == 0 || isTRUE(all(x == ""))) { return(NULL) } else if (!length(positions[[1]]) == length(x)) { @@ -31,10 +30,9 @@ } round_type <- match.arg(round_type) - if (verbose) print(paste0("round_type in rtable_format: ", round_type)) - values <- Map(y = x, fun = roundings, na_str = na_str, output = output, function(y, fun, na_str, output) - fun(y, na_str = na_str, round_type = round_type, output = output)) + values <- Map(y = x, fun = roundings, na_str = na_str, function(y, fun, na_str, output) + fun(y, na_str = na_str, round_type = round_type)) regmatches(x = str, m = positions)[[1]] <- values return(str) diff --git a/tests/testthat/_snaps/summarize_ancova.md b/tests/testthat/_snaps/summarize_ancova.md index 915adf41..5e2a0e28 100644 --- a/tests/testthat/_snaps/summarize_ancova.md +++ b/tests/testthat/_snaps/summarize_ancova.md @@ -48,7 +48,7 @@ n 50 50 50 Mean (SD) 1.46 (0.174) 4.26 (0.470) 5.55 (0.552) Median 1.50 4.35 5.55 - Min, max 1.00, 1.90 3.00, 5.10 4.50, 6.90 + Min, max 1.0, 1.9 3.0, 5.1 4.5, 6.9 25% and 75%-ile 1.40, 1.60 4.00, 4.60 5.10, 5.90 Difference in Adjusted Means (95% CI) 2.80 (2.63, 2.97) 4.09 (3.92, 4.26) p-value <0.001 <0.001 From 29366ce5745fff436005df1c96ab106b010c4fab Mon Sep 17 00:00:00 2001 From: iaugusty Date: Mon, 8 Sep 2025 19:34:15 +0000 Subject: [PATCH 08/49] round_type should be passed to paginate_to_mpfs as well --- R/tt_to_tblfile.R | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/R/tt_to_tblfile.R b/R/tt_to_tblfile.R index 7939c654..32bbe705 100644 --- a/R/tt_to_tblfile.R +++ b/R/tt_to_tblfile.R @@ -359,6 +359,7 @@ tt_to_tlgrtf <- function( } else { hrdmpf <- tt } + round_type <- get_round_type(tt) pags <- paginate_to_mpfs( tt, fontspec = fontspec, @@ -370,7 +371,8 @@ tt_to_tlgrtf <- function( margins = margins, lpp = NULL, nosplitin = nosplitin, - verbose = verbose + verbose = verbose, + round_type = round_type ) ## if (has_force_pag(tt)) { nslices <- which( From 98d777438daf51e1a6e98808b58bb515e54c3b37 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Tue, 9 Sep 2025 07:14:21 +0000 Subject: [PATCH 09/49] round_type as argument to tt_to_tlgrtf --- R/tt_to_tblfile.R | 12 +++++++++--- man/tt_to_tlgrtf.Rd | 6 ++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/R/tt_to_tblfile.R b/R/tt_to_tblfile.R index 32bbe705..0456e4ac 100644 --- a/R/tt_to_tblfile.R +++ b/R/tt_to_tblfile.R @@ -249,6 +249,10 @@ get_ncol <- function(tt) { #' and k is the number of lines the header takes up. See [tidytlg::add_bottom_borders] #' for what the matrix should contain. Users should only specify this when the #' default behavior does not meet their needs. +#' @param round_type (`"iec"` or `"sas"`)\cr the type of rounding to perform. iec, +#' the default, peforms rounding compliant with IEC 60559, while +#' sas performs nearest-value rounding consistent with rounding within SAS. +#' See `?formatters::format_value` for more details. #' @import rlistings #' @rdname tt_to_tlgrtf #' @export @@ -293,6 +297,7 @@ tt_to_tlgrtf <- function( combined_rtf = FALSE, one_table = TRUE, border_mat = make_header_bordmat(obj = tt), + round_type = get_round_type(tt), ...) { orientation <- match.arg(orientation) newdev <- open_font_dev(fontspec) @@ -353,13 +358,13 @@ tt_to_tlgrtf <- function( ) } if (methods::is(tt, "VTableTree")) { - hdrmpf <- matrix_form(tt[1, ]) + hdrmpf <- matrix_form(tt[1, ], round_type = round_type) } else if (methods::is(tt, "list") && methods::is(tt[[1]], "MatrixPrintForm")) { hdrmpf <- tt[[1]] } else { hrdmpf <- tt } - round_type <- get_round_type(tt) + pags <- paginate_to_mpfs( tt, fontspec = fontspec, @@ -540,7 +545,8 @@ tt_to_tlgrtf <- function( utils::head(tt, 1), indent_rownames = FALSE, expand_newlines = FALSE, - fontspec = fontspec + fontspec = fontspec, + round_type = round_type ) colinfo <- mpf_to_colspan( mpf, diff --git a/man/tt_to_tlgrtf.Rd b/man/tt_to_tlgrtf.Rd index e4870a6b..9dce82de 100644 --- a/man/tt_to_tlgrtf.Rd +++ b/man/tt_to_tlgrtf.Rd @@ -26,6 +26,7 @@ tt_to_tlgrtf( combined_rtf = FALSE, one_table = TRUE, border_mat = make_header_bordmat(obj = tt), + round_type = get_round_type(tt), ... ) } @@ -82,6 +83,11 @@ and k is the number of lines the header takes up. See \link[tidytlg:add_bottom_b for what the matrix should contain. Users should only specify this when the default behavior does not meet their needs.} +\item{round_type}{(\code{"iec"} or \code{"sas"})\cr the type of rounding to perform. iec, +the default, peforms rounding compliant with IEC 60559, while +sas performs nearest-value rounding consistent with rounding within SAS. +See \code{?formatters::format_value} for more details.} + \item{...}{Additional arguments passed to gentlg} } \value{ From 518112255678b8a5f3c3ddc6fdc147a784cf7f72 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 13 Sep 2025 14:06:47 +0000 Subject: [PATCH 10/49] [skip style] [skip vbump] Restyle files --- R/jjcsformats.R | 189 ++++++++++--------- R/junco_utils_default_stats_formats_labels.R | 14 +- R/tt_to_tblfile.R | 2 +- tests/testthat/test-jjcsformats.R | 11 +- 4 files changed, 110 insertions(+), 106 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 73c6985b..f4e8f416 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -11,7 +11,7 @@ #' the formatters framework be overridden by `na_str_default`? Defaults to #' `TRUE`, as a way to have a different default na string behavior from the #' base `formatters` framework. -#' @return Either a supported format string, or a formatting function that can be +#' @return Either a supported format string, or a formatting function that can be #' used as format in `formatters::format_value` #' @family JJCS formatting functions #' @rdname jjcsformat_xx @@ -24,50 +24,50 @@ #' format_value(value[1], fmt, round_type = "sas") #' format_value(value[1], fmt, round_type = "iec") #' fmt(value[1]) -#' +#' #' fmt2 <- jjcsformat_xx("xx.x (xx.xxx)") #' is.function(fmt2) #' value <- c(1.65, 8.645) #' format_value(value, fmt2, round_type = "sas") -#' format_value(value, fmt2, round_type = "iec") -#' #only possible when resulting format is a function +#' format_value(value, fmt2, round_type = "iec") +#' # only possible when resulting format is a function #' fmt2(value, round_type = "sas") -#' +#' #' value <- c(1.65, NA) #' format_value(value, fmt2, round_type = "iec", na_str = c("ne1", "ne2")) #' fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) - - -jjcsformat_xx <- function(str, +jjcsformat_xx <- function( + str, na_str = na_str_dflt, na_str_dflt = "NE", replace_na_dflt = TRUE) { + if (grepl("xxx.", str, fixed = TRUE)) { + stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") + } - if (grepl("xxx.", str, fixed = TRUE)) { - stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") - } - - if (is_valid_format(str)) { - rtable_format <- str - } else { - + if (is_valid_format(str)) { + rtable_format <- str + } else { if (!grepl("xx", str, fixed = TRUE)) { stop("Error: jjcsformat_xx input str must contain xx.") } - positions <- gregexpr(pattern = "xx\\.?x*", - text = str, - perl = TRUE) + positions <- gregexpr( + pattern = "xx\\.?x*", + text = str, + perl = TRUE + ) x_positions <- regmatches(x = str, m = positions)[[1]] - + roundings <- lapply(X = x_positions, function(fmt) { rounding <- function(x, na_str, round_type) { if (fmt %in% formatters:::formats_1d) { - res <- format_value(x, - fmt, - na_str = na_str[1], - round_type = round_type) + res <- format_value(x, + fmt, + na_str = na_str[1], + round_type = round_type + ) } else if (fmt %in% c("xx.xxxxx", "xx.xxxxxx", "xx.xxxxxxx")) { # p-value fmt sometimes might need more digits d <- nchar(sub(".*\\.", "", fmt)) @@ -77,37 +77,36 @@ jjcsformat_xx <- function(str, } return(rounding) }) - - rtable_format <- - function( - x, - output, - round_type = c("sas", "iec"), - na_str = na_str_dflt) { - if (anyNA(na_str) || (replace_na_dflt && any(na_str == "NA"))) { - na_inds <- which(is.na(na_str) | (replace_na_dflt & na_str == "NA")) - na_str[na_inds] <- rep(na_str_dflt, length.out = length(na_str))[na_inds] - } - if (length(x) == 0 || isTRUE(all(x == ""))) { - return(NULL) - } else if (!length(positions[[1]]) == length(x)) { - stop( - "Error: input str in call to jjcsformat_xx must contain same number of xx as the number of stats." - ) + + rtable_format <- + function(x, + output, + round_type = c("sas", "iec"), + na_str = na_str_dflt) { + if (anyNA(na_str) || (replace_na_dflt && any(na_str == "NA"))) { + na_inds <- which(is.na(na_str) | (replace_na_dflt & na_str == "NA")) + na_str[na_inds] <- rep(na_str_dflt, length.out = length(na_str))[na_inds] + } + if (length(x) == 0 || isTRUE(all(x == ""))) { + return(NULL) + } else if (!length(positions[[1]]) == length(x)) { + stop( + "Error: input str in call to jjcsformat_xx must contain same number of xx as the number of stats." + ) + } + + round_type <- match.arg(round_type) + + values <- Map(y = x, fun = roundings, na_str = na_str, function(y, fun, na_str, output) { + fun(y, na_str = na_str, round_type = round_type) + }) + + regmatches(x = str, m = positions)[[1]] <- values + return(str) } - - round_type <- match.arg(round_type) - - values <- Map(y = x, fun = roundings, na_str = na_str, function(y, fun, na_str, output) - fun(y, na_str = na_str, round_type = round_type)) - - regmatches(x = str, m = positions)[[1]] <- values - return(str) - } - + return(rtable_format) - } - + } } @@ -133,59 +132,63 @@ jjcsformat_xx <- function(str, #' @export jjcsformat_cnt_den_fract_fct <- function(d = 1, - type = c("count_fraction", "count_denom_fraction", "fraction_count_denom"), - verbose = FALSE - ){ + type = c("count_fraction", "count_denom_fraction", "fraction_count_denom"), + verbose = FALSE) { type <- match.arg(type) - + function( - x, - round_type = c("sas", "iec"), - output, - ...) { + x, + round_type = c("sas", "iec"), + output, + ...) { attr(x, "label") <- NULL if (any(is.na(x))) { return("-") } - + round_type <- match.arg(round_type) - + checkmate::assert_vector(x) count <- x[1] checkmate::assert_integerish(count) - + fraction <- switch(type, - "count_fraction" = x[2], - "count_denom_fraction" = x[3], - "fraction_count_denom" = x[3]) - - + "count_fraction" = x[2], + "count_denom_fraction" = x[3], + "fraction_count_denom" = x[3] + ) + + assert_proportion_value( fraction, include_boundaries = TRUE ) - + if (isTRUE(all.equal(fraction, 1))) fraction <- 1 - + if (type == "count_fraction") fmt_cd <- format_value(x = count, format = "xx") if (type %in% c("count_denom_fraction", "fraction_count_denom")) { denom <- x[2] checkmate::assert_integerish(denom) - fmt_cd <- paste0(count, "/", denom) + fmt_cd <- paste0(count, "/", denom) } - + if (verbose) print(paste0("round_type used: ", round_type)) - - fmtpct <- format_value(100*fraction, - format = paste0("xx.", strrep("x", times = d)), - output = "ascii", - round_type = round_type) - - - if (type %in% c("count_fraction", "count_denom_fraction")){ + + fmtpct <- format_value(100 * fraction, + format = paste0("xx.", strrep("x", times = d)), + output = "ascii", + round_type = round_type + ) + + + if (type %in% c("count_fraction", "count_denom_fraction")) { result <- if (count == 0) { - if (type == "count_fraction") "0" - else paste0(fmt_cd, " (", fmtpct, "%)") + if (type == "count_fraction") { + "0" + } else { + paste0(fmt_cd, " (", fmtpct, "%)") + } } else if (fraction == 1) { ## per conventions still report as 100.0% paste0(fmt_cd, " (100.0%)") @@ -221,17 +224,17 @@ jjcsformat_cnt_den_fract_fct <- function(d = 1, paste0(">", 100 - 10**(-d), "% (", fmt_cd, ")") } else { paste0(fmtpct, "% (", fmt_cd, ")") - } + } } - - return(result) + + return(result) } } #' @rdname count_fraction #' @export #' @examples -#' +#' #' jjcsformat_count_fraction(c(7, 0.7)) #' jjcsformat_count_fraction(c(70000, 70000 / 70001)) #' jjcsformat_count_fraction(c(235, 235 / 235)) @@ -242,7 +245,7 @@ jjcsformat_count_fraction <- jjcsformat_cnt_den_fract_fct(type = "count_fraction #' @rdname count_fraction #' @export #' @examples -#' +#' #' jjcsformat_count_denom_fraction(c(7, 10, 0.7)) #' jjcsformat_count_denom_fraction(c(70000, 70001, 70000 / 70001)) #' jjcsformat_count_denom_fraction(c(235, 235, 235 / 235)) @@ -253,12 +256,12 @@ jjcsformat_count_denom_fraction <- jjcsformat_cnt_den_fract_fct(type = "count_de #' @rdname count_fraction #' @export #' @examples -#' +#' #' jjcsformat_fraction_count_denom(c(7, 10, 0.7)) #' jjcsformat_fraction_count_denom(c(70000, 70001, 70000 / 70001)) #' jjcsformat_fraction_count_denom(c(235, 235, 235 / 235)) #' fmt <- jjcsformat_cnt_den_fract_fct(type = "fraction_count_denom", d = 2) -#' fmt(c(23, 235, 23 / 235)) +#' fmt(c(23, 235, 23 / 235)) jjcsformat_fraction_count_denom <- jjcsformat_cnt_den_fract_fct(type = "fraction_count_denom") @@ -304,8 +307,8 @@ jjcsformat_pval_fct <- function(alpha = 0.05) { xx_format <- "xx.xxx" if (!is.na(x) && alpha < 0.001 && alpha > 0) { stop("jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001.") - } - + } + if (is.na(x)) { format_value(x, jjcsformat_xx(xx_format), na_str = na_str) } else if (x < 0.001) { @@ -361,7 +364,7 @@ jjcsformat_range_fct <- function(str) { ) checkmate::assert_true(all(x[c(3, 4)] %in% c(0, 1))) - res <- vapply(x[c(1, 2)], FUN = function(x){ + res <- vapply(x[c(1, 2)], FUN = function(x) { format_value(x, format_xx, round_type = round_type) }, character(1)) if (x[3] == 1) res[1] <- paste0(res[1], "+") diff --git a/R/junco_utils_default_stats_formats_labels.R b/R/junco_utils_default_stats_formats_labels.R index 65016e81..1f1035c6 100644 --- a/R/junco_utils_default_stats_formats_labels.R +++ b/R/junco_utils_default_stats_formats_labels.R @@ -263,10 +263,10 @@ junco_default_formats_start <- c( diff_mean_est_ci = jjcsformat_xx("xx.xxx (xx.xxx, xx.xxx)"), event_free_ci = jjcsformat_xx("xx.xx (xx.xx, xx.xx)"), event_free_rate = jjcsformat_xx("xx.xx"), - geom_sd = jjcsformat_xx("xx.xxx"), - geom_se = jjcsformat_xx("xx.xxx"), - geom_mean_sd = jjcsformat_xx("xx.xx (xx.xxx)"), - geom_mean_se = jjcsformat_xx("xx.xx (xx.xxx)"), + geom_sd = jjcsformat_xx("xx.xxx"), + geom_se = jjcsformat_xx("xx.xxx"), + geom_mean_sd = jjcsformat_xx("xx.xx (xx.xxx)"), + geom_mean_se = jjcsformat_xx("xx.xx (xx.xxx)"), hr = jjcsformat_xx("xx.xx"), hr_ci = jjcsformat_xx("(xx.xx, xx.xx)"), hr_ci_3d = jjcsformat_xx("xx.xx (xx.xx, xx.xx)"), @@ -284,7 +284,7 @@ junco_default_formats_start <- c( median = jjcsformat_xx("xx.xx"), median_ci = jjcsformat_xx("(xx.xx, xx.xx)"), median_ci_3d = jjcsformat_xx("xx.xx (xx.xx, xx.xx)"), - median_range = jjcsformat_xx("xx.xx (xx.x, xx.x)"), + median_range = jjcsformat_xx("xx.xx (xx.x, xx.x)"), n = jjcsformat_xx("xx."), n_tot = jjcsformat_xx("xx."), n_tot_events = jjcsformat_xx("xx."), @@ -300,8 +300,8 @@ junco_default_formats_start <- c( rate_se = jjcsformat_xx("xx.xx"), rel_risk_ci = jjcsformat_xx("xx.xx (xx.xx - xx.xx)"), quantiles_upper = jjcsformat_xx("xx.xx (xx.xx, xx.xx)"), - sd = jjcsformat_xx("xx.xxx"), - se = jjcsformat_xx("xx.xxx"), + sd = jjcsformat_xx("xx.xxx"), + se = jjcsformat_xx("xx.xxx"), n_altdf = "xx", n_df = "xx", n_rowdf = "xx", diff --git a/R/tt_to_tblfile.R b/R/tt_to_tblfile.R index 0456e4ac..d17bd24d 100644 --- a/R/tt_to_tblfile.R +++ b/R/tt_to_tblfile.R @@ -364,7 +364,7 @@ tt_to_tlgrtf <- function( } else { hrdmpf <- tt } - + pags <- paginate_to_mpfs( tt, fontspec = fontspec, diff --git a/tests/testthat/test-jjcsformats.R b/tests/testthat/test-jjcsformats.R index 6cc6e210..fdd95cb8 100644 --- a/tests/testthat/test-jjcsformats.R +++ b/tests/testthat/test-jjcsformats.R @@ -181,13 +181,15 @@ test_that("jjcsformat_pval_fct works", { }) }) -test_that("some special cases for jjcsformat_pval_fct",{ +test_that("some special cases for jjcsformat_pval_fct", { expect_identical( format_value(NA_real_, format = jjcsformat_pval_fct(0), na_str = "NE"), - "NE") + "NE" + ) expect_identical( format_value(NA_real_, format = jjcsformat_pval_fct(0.0005), na_str = "NE"), - "NE") + "NE" + ) expect_error( format_value(0.00000123, format = jjcsformat_pval_fct(0.0005), na_str = "NE"), "jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001." @@ -195,8 +197,7 @@ test_that("some special cases for jjcsformat_pval_fct",{ expect_error( format_value(NA_real_, format = jjcsformat_pval_fct(0.0005), na_str = "NE")? "jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001." - ) - + ) }) test_that("jjcsformat_xx works also for empty cells", { From 9e5af1f033306924157b688446e2cf250e0eb6b3 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 13 Sep 2025 14:09:05 +0000 Subject: [PATCH 11/49] [skip roxygen] [skip vbump] Roxygen Man Pages Auto Update --- DESCRIPTION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DESCRIPTION b/DESCRIPTION index eb8e18d8..d9d6451d 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -59,7 +59,7 @@ Imports: Encoding: UTF-8 Language: en-US Roxygen: list(markdown = TRUE) -RoxygenNote: 7.3.2 +RoxygenNote: 7.3.3 Suggests: knitr, rmarkdown, From 3be1847d36d0e2788b22bc6feaadb2257f26359c Mon Sep 17 00:00:00 2001 From: iaugusty Date: Fri, 7 Nov 2025 15:12:46 +0000 Subject: [PATCH 12/49] resolve test problems --- R/jjcsformats.R | 9 ++-- R/tt_to_tblfile.R | 7 ++- man/count_fraction.Rd | 2 +- man/jjcsformat_xx.Rd | 4 +- man/tt_to_tlgrtf.Rd | 2 +- ...unco_utils_default_stats_formats_labels.md | 50 +++++++++---------- 6 files changed, 39 insertions(+), 35 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index f4e8f416..80f32350 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -136,11 +136,10 @@ jjcsformat_cnt_den_fract_fct <- function(d = 1, verbose = FALSE) { type <- match.arg(type) - function( - x, - round_type = c("sas", "iec"), - output, - ...) { + function(x, + round_type = c("sas", "iec"), + output, + ...) { attr(x, "label") <- NULL if (any(is.na(x))) { return("-") diff --git a/R/tt_to_tblfile.R b/R/tt_to_tblfile.R index d17bd24d..e07ced62 100644 --- a/R/tt_to_tblfile.R +++ b/R/tt_to_tblfile.R @@ -297,7 +297,7 @@ tt_to_tlgrtf <- function( combined_rtf = FALSE, one_table = TRUE, border_mat = make_header_bordmat(obj = tt), - round_type = get_round_type(tt), + round_type = NULL, ...) { orientation <- match.arg(orientation) newdev <- open_font_dev(fontspec) @@ -305,6 +305,9 @@ tt_to_tlgrtf <- function( on.exit(close_font_dev()) } + if (is.null(round_type) && tlgtype == "Table"){ + round_type <- round_type(tt) + } if (tlgtype == "Listing" && nrow(tt) == 0) { dat <- as.list(c("No data to report", rep("", ncol(tt) - 1))) names(dat) <- names(tt) @@ -434,6 +437,7 @@ tt_to_tlgrtf <- function( string_map = string_map, markup_df = markup_df, border_mat = pag_bord_mats[[i]], + round_type = round_type, ... ) } @@ -456,6 +460,7 @@ tt_to_tlgrtf <- function( colwidths = colwidths, ## this is largely ignored see note in docs # colwidths are already on the pags since they are mpfs border_mat = pag_bord_mats, + round_type = round_type, ... ) } else if (!is.null(file)) { # only one page after pagination diff --git a/man/count_fraction.Rd b/man/count_fraction.Rd index f24a603a..75981369 100644 --- a/man/count_fraction.Rd +++ b/man/count_fraction.Rd @@ -55,7 +55,7 @@ jjcsformat_fraction_count_denom(c(7, 10, 0.7)) jjcsformat_fraction_count_denom(c(70000, 70001, 70000 / 70001)) jjcsformat_fraction_count_denom(c(235, 235, 235 / 235)) fmt <- jjcsformat_cnt_den_fract_fct(type = "fraction_count_denom", d = 2) -fmt(c(23, 235, 23 / 235)) +fmt(c(23, 235, 23 / 235)) } \seealso{ Other JJCS formatting functions: diff --git a/man/jjcsformat_xx.Rd b/man/jjcsformat_xx.Rd index 99c44b8e..3b6892bd 100644 --- a/man/jjcsformat_xx.Rd +++ b/man/jjcsformat_xx.Rd @@ -75,8 +75,8 @@ fmt2 <- jjcsformat_xx("xx.x (xx.xxx)") is.function(fmt2) value <- c(1.65, 8.645) format_value(value, fmt2, round_type = "sas") -format_value(value, fmt2, round_type = "iec") -#only possible when resulting format is a function +format_value(value, fmt2, round_type = "iec") +# only possible when resulting format is a function fmt2(value, round_type = "sas") value <- c(1.65, NA) diff --git a/man/tt_to_tlgrtf.Rd b/man/tt_to_tlgrtf.Rd index 9dce82de..7bf1fd73 100644 --- a/man/tt_to_tlgrtf.Rd +++ b/man/tt_to_tlgrtf.Rd @@ -26,7 +26,7 @@ tt_to_tlgrtf( combined_rtf = FALSE, one_table = TRUE, border_mat = make_header_bordmat(obj = tt), - round_type = get_round_type(tt), + round_type = NULL, ... ) } diff --git a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md index 7ef632b7..e58135ab 100644 --- a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md +++ b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md @@ -12,31 +12,31 @@ res Output $quantiles_upper - function( - x, - output, - round_type = c("sas", "iec"), - na_str = na_str_dflt) { - if (anyNA(na_str) || (replace_na_dflt && any(na_str == "NA"))) { - na_inds <- which(is.na(na_str) | (replace_na_dflt & na_str == "NA")) - na_str[na_inds] <- rep(na_str_dflt, length.out = length(na_str))[na_inds] - } - if (length(x) == 0 || isTRUE(all(x == ""))) { - return(NULL) - } else if (!length(positions[[1]]) == length(x)) { - stop( - "Error: input str in call to jjcsformat_xx must contain same number of xx as the number of stats." - ) + function(x, + output, + round_type = c("sas", "iec"), + na_str = na_str_dflt) { + if (anyNA(na_str) || (replace_na_dflt && any(na_str == "NA"))) { + na_inds <- which(is.na(na_str) | (replace_na_dflt & na_str == "NA")) + na_str[na_inds] <- rep(na_str_dflt, length.out = length(na_str))[na_inds] + } + if (length(x) == 0 || isTRUE(all(x == ""))) { + return(NULL) + } else if (!length(positions[[1]]) == length(x)) { + stop( + "Error: input str in call to jjcsformat_xx must contain same number of xx as the number of stats." + ) + } + + round_type <- match.arg(round_type) + + values <- Map(y = x, fun = roundings, na_str = na_str, function(y, fun, na_str, output) { + fun(y, na_str = na_str, round_type = round_type) + }) + + regmatches(x = str, m = positions)[[1]] <- values + return(str) } - - round_type <- match.arg(round_type) - - values <- Map(y = x, fun = roundings, na_str = na_str, function(y, fun, na_str, output) - fun(y, na_str = na_str, round_type = round_type)) - - regmatches(x = str, m = positions)[[1]] <- values - return(str) - } $range_with_cens_info @@ -50,7 +50,7 @@ ) checkmate::assert_true(all(x[c(3, 4)] %in% c(0, 1))) - res <- vapply(x[c(1, 2)], FUN = function(x){ + res <- vapply(x[c(1, 2)], FUN = function(x) { format_value(x, format_xx, round_type = round_type) }, character(1)) if (x[3] == 1) res[1] <- paste0(res[1], "+") From 9dd56bd71dd22dbe6fa0f65eb464c9a5d312b510 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Fri, 7 Nov 2025 16:58:09 +0000 Subject: [PATCH 13/49] apply review suggestions --- R/jjcsformats.R | 76 ++++++++++++++++++------------------------------- 1 file changed, 27 insertions(+), 49 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 80f32350..953e0a99 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -58,10 +58,10 @@ jjcsformat_xx <- function( ) x_positions <- regmatches(x = str, m = positions)[[1]] - roundings <- lapply(X = x_positions, function(fmt) { - rounding <- function(x, - na_str, - round_type) { + single_rounding <- function(fmt) { + function(x, + na_str, + round_type) { if (fmt %in% formatters:::formats_1d) { res <- format_value(x, fmt, @@ -75,7 +75,10 @@ jjcsformat_xx <- function( } res } - return(rounding) + } + + roundings <- lapply(X = x_positions, function(fmt) { + single_rounding(fmt) }) rtable_format <- @@ -140,7 +143,7 @@ jjcsformat_cnt_den_fract_fct <- function(d = 1, round_type = c("sas", "iec"), output, ...) { - attr(x, "label") <- NULL + obj_label(x) <- NULL if (any(is.na(x))) { return("-") } @@ -172,7 +175,7 @@ jjcsformat_cnt_den_fract_fct <- function(d = 1, fmt_cd <- paste0(count, "/", denom) } - if (verbose) print(paste0("round_type used: ", round_type)) + if (verbose) message(paste0("round_type used: ", round_type)) fmtpct <- format_value(100 * fraction, format = paste0("xx.", strrep("x", times = d)), @@ -180,50 +183,25 @@ jjcsformat_cnt_den_fract_fct <- function(d = 1, round_type = round_type ) + fmtpct_p2 <- fmtpct + # deal with special cases + if (fraction == 1) { + fmtpct_p2 <- "100.0" + } else if (fmtpct == format(100, nsmall = d)) { + fmtpct_p2 <- paste0(">", 100 - 10**(-d)) + } else if (count != 0 && fmtpct == format(0, nsmall = d)) { + fmtpct_p2 <- paste0("<", 10**(-d)) + } - if (type %in% c("count_fraction", "count_denom_fraction")) { - result <- if (count == 0) { - if (type == "count_fraction") { - "0" - } else { - paste0(fmt_cd, " (", fmtpct, "%)") - } - } else if (fraction == 1) { - ## per conventions still report as 100.0% - paste0(fmt_cd, " (100.0%)") - } else if (fmtpct == format(0, nsmall = d)) { - # else if (100*x[2] < 10**(-d)) { - ### example pct = 0.09999 ### <0.1% (even if fmtpct == 0.1, - # but the actual value of pct <0.1) - paste0(fmt_cd, " (<", 10**(-d), "%)") - } else if (fmtpct == format(100, nsmall = d)) { - # else if (100*x[2] > 100-10**(-d)) { - ### example pct = 99.90001 ### >99.9% (even if fmtpct == 99.9, - # but the actual value of pct >99.9) - paste0(fmt_cd, " (>", 100 - 10**(-d), "%)") - } else { - paste0(fmt_cd, " (", fmtpct, "%)") - } + fmtpct_p2 <- paste0(fmtpct_p2, "%") + fmtpct_p <- paste0(" (", fmtpct_p2, ")") + + result <- if (type == "fraction_count_denom") { + paste0(fmtpct_p2, " (", fmt_cd, ")") + } else if (count == 0 && type == "count_fraction") { + 0 } else { - # type == fraction_count_denom - result <- if (count == 0) { - "0" - } else if (fraction == 1) { - ## per conventions still report as 100.0% - paste0("100.0% (", fmt_cd, ")") - } else if (fmtpct == format(0, nsmall = d)) { - # else if (100*x[2] < 10**(-d)) { - ### example pct = 0.09999 ### <0.1% (even if fmtpct == 0.1, - # but the actual value of pct <0.1) - paste0("<", 10**(-d), "%) (", fmt_cd, ")") - } else if (fmtpct == format(100, nsmall = d)) { - # else if (100*x[2] > 100-10**(-d)) { - ### example pct = 99.90001 ### >99.9% (even if fmtpct == 99.9, - # but the actual value of pct >99.9) - paste0(">", 100 - 10**(-d), "% (", fmt_cd, ")") - } else { - paste0(fmtpct, "% (", fmt_cd, ")") - } + paste0(fmt_cd, fmtpct_p) } return(result) From 5b59db585bb406e9a9c81ec5ed25a8e28655fad9 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 09:22:13 +0000 Subject: [PATCH 14/49] [skip style] [skip vbump] Restyle files --- R/jjcsformats.R | 12 ++-- R/tt_to_tblfile.R | 86 +++++++++++++------------ tests/testthat/test-count_pct_relrisk.R | 13 ++-- 3 files changed, 55 insertions(+), 56 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 953e0a99..7ac534d1 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -37,10 +37,11 @@ #' format_value(value, fmt2, round_type = "iec", na_str = c("ne1", "ne2")) #' fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) jjcsformat_xx <- function( - str, - na_str = na_str_dflt, - na_str_dflt = "NE", - replace_na_dflt = TRUE) { + str, + na_str = na_str_dflt, + na_str_dflt = "NE", + replace_na_dflt = TRUE +) { if (grepl("xxx.", str, fixed = TRUE)) { stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") } @@ -113,8 +114,6 @@ jjcsformat_xx <- function( } - - #' @name count and fraction related formatting functions #' @title Formatting functions for count and fraction, and for count denominator and fraction values #' @@ -242,7 +241,6 @@ jjcsformat_count_denom_fraction <- jjcsformat_cnt_den_fract_fct(type = "count_de jjcsformat_fraction_count_denom <- jjcsformat_cnt_den_fract_fct(type = "fraction_count_denom") - #' @title Function factory for p-value formatting #' #' @description A function factory to generate formatting functions for p-value diff --git a/R/tt_to_tblfile.R b/R/tt_to_tblfile.R index 7f6cf62b..3f6d7c26 100644 --- a/R/tt_to_tblfile.R +++ b/R/tt_to_tblfile.R @@ -7,10 +7,11 @@ #' @return `tt` represented as a `tbl` data.frame suitable for passing #' to [tidytlg::gentlg] via the `huxme` argument. tt_to_tbldf <- function( - tt, - fontspec = font_spec("Times", 9L, 1), - string_map = default_str_map, - markup_df = dps_markup_df) { + tt, + fontspec = font_spec("Times", 9L, 1), + string_map = default_str_map, + markup_df = dps_markup_df +) { if (!validate_table_struct(tt)) { stop( "invalid table structure. summarize_row_groups without ", @@ -108,9 +109,10 @@ tlg_type <- function(tt) { } mpf_to_colspan <- function( - mpf, - string_map = default_str_map, - markup_df = dps_markup_df) { + mpf, + string_map = default_str_map, + markup_df = dps_markup_df +) { if (!methods::is(mpf, "MatrixPrintForm")) { stop("figure out how to make this an mpf (MatrixPrintForm) first.") } @@ -271,38 +273,39 @@ get_ncol <- function(tt) { #' @return If `file` is non-NULL, this is called for the side-effect of writing #' one or more RTF files. Otherwise, returns a list of `huxtable` objects. tt_to_tlgrtf <- function( + tt, + file = NULL, + orientation = c("portrait", "landscape"), + colwidths = def_colwidths( tt, - file = NULL, - orientation = c("portrait", "landscape"), - colwidths = def_colwidths( - tt, - fontspec, - col_gap = col_gap, - label_width_ins = label_width_ins, - type = tlgtype - ), - label_width_ins = 2, - watermark = NULL, - pagenum = ifelse(tlgtype == "Listing", TRUE, FALSE), - fontspec = font_spec("Times", 9L, 1.2), - pg_width = pg_width_by_orient(orientation == "landscape"), - margins = c(0, 0, 0, 0), - paginate = tlg_type(tt) == "Table", - col_gap = ifelse(tlgtype == "Listing", .5, 3), - nosplitin = list( - row = character(), - col = character() - ), - verbose = FALSE, - tlgtype = tlg_type(tt), - string_map = default_str_map, - markup_df = dps_markup_df, - combined_rtf = FALSE, - one_table = TRUE, - border_mat = make_header_bordmat(obj = tt), - round_type = NULL, - validate = TRUE, - ...) { + fontspec, + col_gap = col_gap, + label_width_ins = label_width_ins, + type = tlgtype + ), + label_width_ins = 2, + watermark = NULL, + pagenum = ifelse(tlgtype == "Listing", TRUE, FALSE), + fontspec = font_spec("Times", 9L, 1.2), + pg_width = pg_width_by_orient(orientation == "landscape"), + margins = c(0, 0, 0, 0), + paginate = tlg_type(tt) == "Table", + col_gap = ifelse(tlgtype == "Listing", .5, 3), + nosplitin = list( + row = character(), + col = character() + ), + verbose = FALSE, + tlgtype = tlg_type(tt), + string_map = default_str_map, + markup_df = dps_markup_df, + combined_rtf = FALSE, + one_table = TRUE, + border_mat = make_header_bordmat(obj = tt), + round_type = NULL, + validate = TRUE, + ... +) { # Validate table structure if requested and not disabled by environment variable # nolint start if (validate && tlgtype == "Table" && methods::is(tt, "VTableTree") && @@ -321,7 +324,7 @@ tt_to_tlgrtf <- function( on.exit(close_font_dev()) } - if (is.null(round_type) && tlgtype == "Table"){ + if (is.null(round_type) && tlgtype == "Table") { round_type <- round_type(tt) } if (tlgtype == "Listing" && nrow(tt) == 0) { @@ -708,8 +711,9 @@ fixup_bord_mat <- function(brdmat, hstrs) { } .make_header_bordmat <- function( - obj, - mpf = matrix_form(utils::head(obj, 1), expand_newlines = FALSE)) { + obj, + mpf = matrix_form(utils::head(obj, 1), expand_newlines = FALSE) +) { spns <- mf_spans(mpf) nlh <- mf_nlheader(mpf) nrh <- mf_nrheader(mpf) diff --git a/tests/testthat/test-count_pct_relrisk.R b/tests/testthat/test-count_pct_relrisk.R index 04178c1a..dd04400b 100644 --- a/tests/testthat/test-count_pct_relrisk.R +++ b/tests/testthat/test-count_pct_relrisk.R @@ -57,10 +57,11 @@ wald_diff <- function(inputs) { } count_unique_subjects <- function( - df, - id = "USUBJID", - sub_set = NULL, - var = NULL) { + df, + id = "USUBJID", + sub_set = NULL, + var = NULL +) { if (!is.null(sub_set)) { df <- subset(df, sub_set) } @@ -237,10 +238,6 @@ test_that("a_freq_j with val = NA and denom option", { }) - - - - test_that("a_freq_j with risk difference method cmh", { adsl_col <- adsl[adsl$ARM == "A: Drug X", ] adae_col <- adae[adae$ARM == "A: Drug X", ] From 3120443852fc9c9f0c636906d1fb6525f884ed48 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 09:23:36 +0000 Subject: [PATCH 15/49] [skip roxygen] [skip vbump] Roxygen Man Pages Auto Update --- man/tt_to_tlgrtf.Rd | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/man/tt_to_tlgrtf.Rd b/man/tt_to_tlgrtf.Rd index 2f6d922d..61ee195f 100644 --- a/man/tt_to_tlgrtf.Rd +++ b/man/tt_to_tlgrtf.Rd @@ -26,8 +26,8 @@ tt_to_tlgrtf( combined_rtf = FALSE, one_table = TRUE, border_mat = make_header_bordmat(obj = tt), - validate = TRUE, round_type = NULL, + validate = TRUE, ... ) } @@ -84,15 +84,15 @@ and k is the number of lines the header takes up. See \link[tidytlg:add_bottom_b for what the matrix should contain. Users should only specify this when the default behavior does not meet their needs.} -\item{validate}{logical(1). Whether to validate the table structure using -\code{rtables::validate_table_struct()}. Defaults to \code{TRUE}. This can also be disabled -globally by setting the environment variable \code{JUNCO_DISABLE_VALIDATION=TRUE}.} - \item{round_type}{(\code{"iec"} or \code{"sas"})\cr the type of rounding to perform. iec, the default, peforms rounding compliant with IEC 60559, while sas performs nearest-value rounding consistent with rounding within SAS. See \code{?formatters::format_value} for more details.} +\item{validate}{logical(1). Whether to validate the table structure using +\code{rtables::validate_table_struct()}. Defaults to \code{TRUE}. This can also be disabled +globally by setting the environment variable \code{JUNCO_DISABLE_VALIDATION=TRUE}.} + \item{...}{Additional arguments passed to gentlg} } \value{ From a425c770fd81e2456f36e2c7e0288fa06df402fb Mon Sep 17 00:00:00 2001 From: iaugusty Date: Tue, 2 Dec 2025 12:47:09 +0000 Subject: [PATCH 16/49] update tests for round_type --- tests/testthat/_snaps/a_freq_j.md | 24 ++++++ ...unco_utils_default_stats_formats_labels.md | 2 +- .../_snaps/tt_to_tblfile/test3allparts.rtf | 0 tests/testthat/sas_comparison/relative_risk.R | 4 +- tests/testthat/test-a_freq_j.R | 2 + tests/testthat/test-a_freq_resp_var_j.R | 28 +++---- tests/testthat/test-a_maxlev.R | 2 +- .../testthat/test-a_summarize_aval_chg_diff.R | 8 +- tests/testthat/test-analyze_values.R | 4 +- tests/testthat/test-blank_line.R | 8 +- tests/testthat/test-cmp_functions.R | 4 +- tests/testthat/test-colwidths.R | 4 +- tests/testthat/test-count_pct.R | 76 +++++++++---------- tests/testthat/test-count_pct_relrisk.R | 16 ++-- tests/testthat/test-coxph_hr.R | 8 +- tests/testthat/test-coxreg_multivar.R | 4 +- tests/testthat/test-event_free.R | 8 +- tests/testthat/test-get_ref_info.R | 16 ++-- tests/testthat/test-jjcs_num_formats.R | 7 +- tests/testthat/test-kaplan_meier.R | 8 +- tests/testthat/test-patyrs-eair100.R | 10 +-- tests/testthat/test-proportions.R | 4 +- tests/testthat/test-pruning_functions.R | 40 +++++----- tests/testthat/test-relative_risk.R | 4 +- tests/testthat/test-remove_col_count.R | 5 +- tests/testthat/test-resp01_functions.R | 8 +- tests/testthat/test-response_by_var.R | 12 +-- tests/testthat/test-sorting_functions.R | 8 +- tests/testthat/test-split_functions.R | 29 +++---- tests/testthat/test-summarize_ancova.R | 4 +- 30 files changed, 195 insertions(+), 162 deletions(-) mode change 100644 => 100755 tests/testthat/_snaps/tt_to_tblfile/test3allparts.rtf diff --git a/tests/testthat/_snaps/a_freq_j.md b/tests/testthat/_snaps/a_freq_j.md index cde591da..f0eaa087 100644 --- a/tests/testthat/_snaps/a_freq_j.md +++ b/tests/testthat/_snaps/a_freq_j.md @@ -1,3 +1,27 @@ +# a_freq_j with label_map works in a table layout as expected + + Code + result + Output + A B + ——————————————————————————————————————————— + No Response 30/50 (60.0%) 28/50 (56.0%) + Response 20/50 (40.0%) 22/50 (44.0%) + +# a_freq_j with label_map restricts the values according to row split and label_map + + Code + result + Output + A B + —————————————————————————————————————————— + Baseline + Response A 6/50 (12.0%) 4/50 (8.0%) + Response B 6/50 (12.0%) 4/50 (8.0%) + Week 1 + Response C 3/50 (6.0%) 7/50 (14.0%) + Response D 9/50 (18.0%) 7/50 (14.0%) + # a_freq_j_with_exclude allows to exclude row split levels from the analysis Code diff --git a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md index e58135ab..1b669701 100644 --- a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md +++ b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md @@ -40,7 +40,7 @@ $range_with_cens_info - function(x, output, round_type = c("iec", "sas"), ...) { + function(x, output, round_type = valid_round_type, ...) { round_type <- match.arg(round_type) checkmate::assert_numeric( x, diff --git a/tests/testthat/_snaps/tt_to_tblfile/test3allparts.rtf b/tests/testthat/_snaps/tt_to_tblfile/test3allparts.rtf old mode 100644 new mode 100755 diff --git a/tests/testthat/sas_comparison/relative_risk.R b/tests/testthat/sas_comparison/relative_risk.R index 56e4643f..35be3446 100644 --- a/tests/testthat/sas_comparison/relative_risk.R +++ b/tests/testthat/sas_comparison/relative_risk.R @@ -261,7 +261,7 @@ dat <- data.frame( )) ) -lyt <- basic_table() |> +lyt <- basic_table(round_type = "sas") |> split_cols_by("Treatment", ref_group = "B") |> analyze( vars = "Response", @@ -292,4 +292,4 @@ lyt <- basic_table() |> ) ) -build_table(lyt, dat, round_type = "sas") +build_table(lyt, dat) diff --git a/tests/testthat/test-a_freq_j.R b/tests/testthat/test-a_freq_j.R index 95054b43..bbd764cd 100644 --- a/tests/testthat/test-a_freq_j.R +++ b/tests/testthat/test-a_freq_j.R @@ -20,6 +20,7 @@ test_that("a_freq_j with label_map works in a table layout as expected", { ) ) result <- build_table(lyt, dta) + expect_snapshot(result) }) test_that("a_freq_j with label_map restricts the values according to row split and label_map", { @@ -47,6 +48,7 @@ test_that("a_freq_j with label_map restricts the values according to row split a ) ) result <- build_table(lyt, dta) + expect_snapshot(result) }) test_that("a_freq_j_with_exclude allows to exclude row split levels from the analysis", { diff --git a/tests/testthat/test-a_freq_resp_var_j.R b/tests/testthat/test-a_freq_resp_var_j.R index 36e664fb..c3fa105f 100644 --- a/tests/testthat/test-a_freq_resp_var_j.R +++ b/tests/testthat/test-a_freq_resp_var_j.R @@ -16,7 +16,7 @@ test_that("a_freq_resp_var_j works as expected with basic usage", { ) # Create the layout based on patterns seen in test-varia.R - lyt <- basic_table(show_colcounts = TRUE) %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( "SEX", @@ -28,7 +28,7 @@ test_that("a_freq_resp_var_j works as expected with basic usage", { ) # Build the table - tbl <- build_table(lyt, adrs, round_type = "sas") + tbl <- build_table(lyt, adrs) expect_true(!is.null(tbl)) # Extract and check one cell for basic validation @@ -54,7 +54,7 @@ test_that("a_freq_resp_var_j works with factor responses", { ) # Create the layout - lyt <- basic_table(show_colcounts = TRUE) %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( "SEX", @@ -66,7 +66,7 @@ test_that("a_freq_resp_var_j works with factor responses", { ) # Should not throw an error - expect_no_error(build_table(lyt, adrs, round_type = "sas")) + expect_no_error(build_table(lyt, adrs)) }) test_that("a_freq_resp_var_j handles missing values correctly", { @@ -86,7 +86,7 @@ test_that("a_freq_resp_var_j handles missing values correctly", { adrs$SEX[6:10] <- NA # Create the layout - lyt <- basic_table(show_colcounts = TRUE) %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( "SEX", @@ -98,7 +98,7 @@ test_that("a_freq_resp_var_j handles missing values correctly", { ) # Should not throw an error - expect_no_error(suppressWarnings(build_table(lyt, adrs, round_type = "sas"))) + expect_no_error(suppressWarnings(build_table(lyt, adrs))) }) test_that("a_freq_resp_var_j errors on invalid responses", { @@ -117,7 +117,7 @@ test_that("a_freq_resp_var_j errors on invalid responses", { adrs$RSP[1:3] <- "MAYBE" # Create the layout - lyt <- basic_table(show_colcounts = TRUE) %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( "SEX", @@ -130,7 +130,7 @@ test_that("a_freq_resp_var_j errors on invalid responses", { # Should throw an error about invalid response values expect_error( - build_table(lyt, adrs, round_type = "sas"), + build_table(lyt, adrs), "resp_var must contain only Y/N values" ) }) @@ -148,7 +148,7 @@ test_that("a_freq_resp_var_j errors when resp_var is null", { ) # Create layout with missing resp_var - lyt <- basic_table(show_colcounts = TRUE) %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( "SEX", @@ -157,7 +157,7 @@ test_that("a_freq_resp_var_j errors when resp_var is null", { # Should throw an error about missing resp_var expect_error( - build_table(lyt, adrs, round_type = "sas"), + build_table(lyt, adrs), "resp_var cannot be NULL." ) }) @@ -178,7 +178,7 @@ test_that("a_freq_resp_var_j works with drop_levels parameter", { ) # Create layout with drop_levels = TRUE - lyt <- basic_table(show_colcounts = TRUE) %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( "SEX", @@ -191,7 +191,7 @@ test_that("a_freq_resp_var_j works with drop_levels parameter", { ) # Should not throw an error - expect_no_error(build_table(lyt, adrs, round_type = "sas")) + expect_no_error(build_table(lyt, adrs)) }) test_that("a_freq_resp_var_j works with riskdiff parameter", { @@ -210,7 +210,7 @@ test_that("a_freq_resp_var_j works with riskdiff parameter", { ) # Create layout with drop_levels = TRUE - lyt <- basic_table(show_colcounts = TRUE) %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( "SEX", @@ -223,5 +223,5 @@ test_that("a_freq_resp_var_j works with riskdiff parameter", { ) # Should not throw an error - expect_no_error(build_table(lyt, adrs, round_type = "sas")) + expect_no_error(build_table(lyt, adrs)) }) diff --git a/tests/testthat/test-a_maxlev.R b/tests/testthat/test-a_maxlev.R index af077e26..ab9ee8ab 100644 --- a/tests/testthat/test-a_maxlev.R +++ b/tests/testthat/test-a_maxlev.R @@ -16,7 +16,7 @@ test_that("a_maxlev produces correct numbers for single treatment per subject", AESEV := ordered(AESEV, levels = c("Missing", "Mild", "Moderate", "Severe")) ) - lyt <- basic_table() |> + lyt <- basic_table(round_type = "sas") |> split_cols_by("ARM") |> add_overall_col("Total") |> split_rows_by("AESEV", split_fun = aesevall_spf) |> diff --git a/tests/testthat/test-a_summarize_aval_chg_diff.R b/tests/testthat/test-a_summarize_aval_chg_diff.R index 3ad48ca4..062685d3 100644 --- a/tests/testthat/test-a_summarize_aval_chg_diff.R +++ b/tests/testthat/test-a_summarize_aval_chg_diff.R @@ -94,7 +94,7 @@ test_that("a_summarize_aval_chg_diff_j works as expected", { ref_path <- c("colspan_trt", " ", "TRT01A", "Placebo") - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% ### first columns split_cols_by( "colspan_trt", @@ -142,7 +142,7 @@ test_that("a_summarize_aval_chg_diff_j works as expected", { ) # Test that the table builds without errors - result <- expect_no_error(build_table(lyt, ADEG, round_type = "sas")) + result <- expect_no_error(build_table(lyt, ADEG)) # Check that the result is a valid rtable expect_s4_class(result, "TableTree") @@ -230,7 +230,7 @@ test_that("a_summarize_aval_chg_diff_j works with ancova = TRUE", { ) ref_path <- c("colspan_trt", " ", "TRT01A", "Placebo") - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% ### first columns split_cols_by( "colspan_trt", @@ -279,7 +279,7 @@ test_that("a_summarize_aval_chg_diff_j works with ancova = TRUE", { ) # Test that the table builds without errors - result <- expect_no_error(build_table(lyt, ADEG, round_type = "sas")) + result <- expect_no_error(build_table(lyt, ADEG)) # Check that the result is a valid rtable expect_s4_class(result, "TableTree") diff --git a/tests/testthat/test-analyze_values.R b/tests/testthat/test-analyze_values.R index 2e0bd0ba..1027158e 100644 --- a/tests/testthat/test-analyze_values.R +++ b/tests/testthat/test-analyze_values.R @@ -2,7 +2,7 @@ library(rtables) test_that("analyze_values correctly modifies layout with proper formats", { # Create a simple layout - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% split_cols_by("ARM") # Define variables to analyze @@ -33,7 +33,7 @@ test_that("analyze_values correctly modifies layout with proper formats", { ) # Build the table - tbl <- build_table(modified_lyt, mock_data, round_type = "sas") + tbl <- build_table(modified_lyt, mock_data) # Check that the table has the expected structure expect_true("AGE" %in% row.names(tbl)) diff --git a/tests/testthat/test-blank_line.R b/tests/testthat/test-blank_line.R index f84d3ece..cfa64847 100644 --- a/tests/testthat/test-blank_line.R +++ b/tests/testthat/test-blank_line.R @@ -1,7 +1,7 @@ test_that("insert_blank_line works as expected", { ADSL <- ex_adsl - lyt <- basic_table() |> + lyt <- basic_table(round_type = "sas") |> split_cols_by("ARM") |> split_rows_by("STRATA1") |> analyze(vars = "AGE", afun = function(x) { @@ -23,7 +23,7 @@ test_that("insert_blank_line works as expected", { }) # We don't want to see any warning about duplicate table names here. - tbl <- expect_silent(build_table(lyt, ADSL, round_type = "sas")) + tbl <- expect_silent(build_table(lyt, ADSL)) tbl # We expect 6 blank lines: after mean and range, for each of the @@ -34,7 +34,7 @@ test_that("insert_blank_line works as expected", { test_that("insert_blank_line optionally uses custom table names", { ADSL <- ex_adsl - lyt <- basic_table() |> + lyt <- basic_table(round_type = "sas") |> split_cols_by("ARM") |> split_rows_by("STRATA1") |> analyze(vars = "AGE", afun = function(x) { @@ -56,7 +56,7 @@ test_that("insert_blank_line optionally uses custom table names", { }) # We don't want to see any warning about duplicate table names here. - tbl <- expect_silent(build_table(lyt, ADSL, round_type = "sas")) + tbl <- expect_silent(build_table(lyt, ADSL)) tbl # We expect 6 blank lines: after mean and range, for each of the diff --git a/tests/testthat/test-cmp_functions.R b/tests/testthat/test-cmp_functions.R index a3154302..6f5e6a5a 100644 --- a/tests/testthat/test-cmp_functions.R +++ b/tests/testthat/test-cmp_functions.R @@ -1,7 +1,7 @@ test_that("cmp_split_fun works as expected", { - result <- basic_table() %>% + result <- basic_table(round_type = "sas") %>% split_cols_by("ID", split_fun = cmp_split_fun) |> - build_table(formatters::DM, round_type = "sas") + build_table(formatters::DM) expect_snapshot(result) }) diff --git a/tests/testthat/test-colwidths.R b/tests/testthat/test-colwidths.R index 03644cec..2a922cf3 100644 --- a/tests/testthat/test-colwidths.R +++ b/tests/testthat/test-colwidths.R @@ -15,7 +15,7 @@ ADSL <- data.frame( PKFL = c("N", "N", "N", "N", "N", "N", "N", "N", "N", "N") ) -lyt <- basic_table() |> +lyt <- basic_table(round_type = "sas") |> split_cols_by("TRT01P") |> add_overall_col("Total") |> analyze("FASFL", @@ -37,7 +37,7 @@ lyt <- basic_table() |> show_labels = "visible" ) -tt <- build_table(lyt, ADSL, round_type = "sas") +tt <- build_table(lyt, ADSL) test_that("ttype_wrap_vec works as expected", { result <- ttype_wrap_vec(vec = c(1, 2, 3, 4) %>% as.character(), fontspec = fontspec, width = 2) diff --git a/tests/testthat/test-count_pct.R b/tests/testthat/test-count_pct.R index 58a47e4b..3d684af1 100644 --- a/tests/testthat/test-count_pct.R +++ b/tests/testthat/test-count_pct.R @@ -12,7 +12,7 @@ test_that("a_freq_j with val = NA and denom option", { extra_args_1 <- list( .stats = c("count_unique_denom_fraction") ) - lyt1 <- basic_table(show_colcounts = TRUE) %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( vars = "COUNTRY", @@ -21,11 +21,11 @@ test_that("a_freq_j with val = NA and denom option", { ) # apply to adsl - tbl1 <- build_table(lyt1, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adsl) expect_snapshot(tbl1) # apply to adae - tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1b <- build_table(lyt1, adae, adsl) expect_snapshot(tbl1b) # scenario 1c : denom = .N_col, all values @@ -33,7 +33,7 @@ test_that("a_freq_j with val = NA and denom option", { denom = "N_col", .stats = c("count_unique_denom_fraction") ) - lyt1c <- basic_table(show_colcounts = TRUE) %>% + lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( vars = "COUNTRY", @@ -41,7 +41,7 @@ test_that("a_freq_j with val = NA and denom option", { extra_args = extra_args_2 ) - tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") + tbl1c <- build_table(lyt1c, adae, adsl) expect_snapshot(tbl1c) }) @@ -51,7 +51,7 @@ test_that("a_freq_j with specific val (CHN) and denom option", { .stats = c("count_unique_denom_fraction"), val = "CHN" ) - lyt1 <- basic_table(show_colcounts = TRUE) %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( vars = "COUNTRY", @@ -60,11 +60,11 @@ test_that("a_freq_j with specific val (CHN) and denom option", { ) # apply to adsl - tbl1 <- build_table(lyt1, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adsl) expect_snapshot(tbl1) # apply to adae - tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1b <- build_table(lyt1, adae, adsl) expect_snapshot(tbl1b) # scenario 1c : denom = .N_col, all values @@ -73,7 +73,7 @@ test_that("a_freq_j with specific val (CHN) and denom option", { .stats = c("count_unique_denom_fraction"), val = "CHN" ) - lyt1c <- basic_table(show_colcounts = TRUE) %>% + lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( vars = "COUNTRY", @@ -81,7 +81,7 @@ test_that("a_freq_j with specific val (CHN) and denom option", { extra_args = extra_args_2 ) - tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") + tbl1c <- build_table(lyt1c, adae, adsl) expect_snapshot(tbl1c) }) @@ -91,7 +91,7 @@ test_that("a_freq_j with N_only", { extra_args_1 <- list( .stats = c("count_unique") ) - lyt1 <- basic_table(show_colcounts = TRUE) %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( vars = "COUNTRY", @@ -100,11 +100,11 @@ test_that("a_freq_j with N_only", { ) # apply to adsl - tbl1 <- build_table(lyt1, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adsl) expect_snapshot(tbl1) # apply to adae - tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1b <- build_table(lyt1, adae, adsl) expect_snapshot(tbl1b) }) @@ -115,17 +115,17 @@ test_that("a_freq_j with TotCol_only", { .stats = c("count_unique"), restr_columns = "Total" ) - lyt1 <- basic_table(show_colcounts = TRUE) %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% add_overall_col("Total") %>% analyze(vars = "COUNTRY", afun = a_freq_j, extra_args = extra_args_1) # apply to adsl - tbl1 <- build_table(lyt1, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adsl) expect_snapshot(tbl1) # apply to adae - tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1b <- build_table(lyt1, adae, adsl) expect_snapshot(tbl1b) }) @@ -142,7 +142,7 @@ test_that("a_freq_j as cfun", { denom = "N_col", .stats = c("count_unique_denom_fraction") ) - lyt1 <- basic_table(show_colcounts = TRUE) %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% split_rows_by("AEBODSYS") %>% summarize_row_groups( @@ -157,13 +157,13 @@ test_that("a_freq_j as cfun", { ) # apply to adae - tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adae, adsl) expect_snapshot(tbl1) # scenario 2 : label using label_fstr method works extra_args_2 <- append(extra_args_1, list(label_fstr = "Bodysystem %s")) - lyt2 <- basic_table(show_colcounts = TRUE) %>% + lyt2 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% split_rows_by("AEBODSYS") %>% summarize_row_groups( @@ -178,7 +178,7 @@ test_that("a_freq_j as cfun", { ) # apply to adae - tbl2 <- build_table(lyt2, adae, adsl, round_type = "sas") + tbl2 <- build_table(lyt2, adae, adsl) expect_snapshot(tbl2) }) @@ -198,7 +198,7 @@ test_that("a_freq_j with label map", { # scenario 1 : with label_map extra_args_1 <- list(label_map = Subjs_with_AEs) - lyt1 <- basic_table(show_colcounts = TRUE) %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( vars = "TRTEMFL", @@ -207,12 +207,12 @@ test_that("a_freq_j with label map", { ) # apply to adae - tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adae, adsl) expect_snapshot(tbl1) # scenario 2 : set row label using label parameter extra_args_2 <- list(label = "Subjects with >= 1 AE") - lyt2 <- basic_table(show_colcounts = TRUE) %>% + lyt2 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( vars = "TRTEMFL", @@ -220,7 +220,7 @@ test_that("a_freq_j with label map", { extra_args = extra_args_2 ) - tbl2 <- build_table(lyt2, adae, adsl, round_type = "sas") + tbl2 <- build_table(lyt2, adae, adsl) expect_snapshot(tbl2) }) @@ -230,7 +230,7 @@ test_that("a_freq_j (old count_pats case)", { extra_args_1 <- list( .stats = c("count_unique_denom_fraction") ) - lyt1 <- basic_table(show_colcounts = TRUE) %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( vars = "COUNTRY", @@ -239,11 +239,11 @@ test_that("a_freq_j (old count_pats case)", { ) # apply to adsl - tbl1 <- build_table(lyt1, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adsl) expect_snapshot(tbl1) # apply to adae - tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1b <- build_table(lyt1, adae, adsl) expect_snapshot(tbl1b) # scenario 1c : denom = .N_col, all values @@ -251,7 +251,7 @@ test_that("a_freq_j (old count_pats case)", { denom = "N_col", .stats = c("count_unique_denom_fraction") ) - lyt1c <- basic_table(show_colcounts = TRUE) %>% + lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( vars = "COUNTRY", @@ -259,7 +259,7 @@ test_that("a_freq_j (old count_pats case)", { extra_args = extra_args_2 ) - tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") + tbl1c <- build_table(lyt1c, adae, adsl) expect_snapshot(tbl1c) }) @@ -268,7 +268,7 @@ test_that("a_freq_j with N_subgroup as denom", { # scenario 1: denom = N_subgroup, all values extra_args_1 <- list(denom = "n_df") - lyt1 <- basic_table(show_colcounts = TRUE) %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% split_rows_by("SEX") %>% analyze( @@ -278,12 +278,12 @@ test_that("a_freq_j with N_subgroup as denom", { ) # applied to adsl - tbl1 <- build_table(lyt1, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adsl) expect_snapshot(tbl1) # applied to adae: when denomdf is not specified in the layout, # it will take Nsubgroup from df, not from alt_counts_df - tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1b <- build_table(lyt1, adae, adsl) expect_snapshot(tbl1b) # applied to adae: with denomdf specified in layout @@ -294,7 +294,7 @@ test_that("a_freq_j with N_subgroup as denom", { .stats = c("count_unique_denom_fraction") ) - lyt1c <- basic_table(show_colcounts = TRUE) %>% + lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% split_rows_by("SEX") %>% analyze( @@ -303,7 +303,7 @@ test_that("a_freq_j with N_subgroup as denom", { extra_args = extra_args_2 ) - tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") + tbl1c <- build_table(lyt1c, adae, adsl) expect_snapshot(tbl1c) }) @@ -417,7 +417,7 @@ test_that("a_freq_j with N_trt as denom - special situation", { colgroup = "ARM", riskdiff = FALSE ) - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% split_cols_by(trtvar, show_colcounts = TRUE) %>% split_cols_by("spanheader", split_fun = trim_levels_in_group("ASEV")) %>% split_cols_by("ASEV", show_colcounts = TRUE) %>% @@ -435,7 +435,7 @@ test_that("a_freq_j with N_trt as denom - special situation", { ) ## main focus of this test is on the denominator - tbl <- build_table(lyt, adaeall, alt_counts_df = adsl_, round_type = "sas") + tbl <- build_table(lyt, adaeall, alt_counts_df = adsl_) expect_snapshot(tbl) ## additionally check if the denominator values are as expected @@ -456,12 +456,12 @@ test_that("a_freq_j with keep_levels (CHN, NGA) ", { denom = "N_col", val = c("CHN", "NGA") ) - lyt1 <- basic_table(show_colcounts = TRUE) %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze(vars = "COUNTRY", afun = a_freq_j, extra_args = extra_args_1) # apply to adsl - tbl1 <- build_table(lyt1, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adsl) expect_snapshot(tbl1) # Also keep the original test to verify the specific row names diff --git a/tests/testthat/test-count_pct_relrisk.R b/tests/testthat/test-count_pct_relrisk.R index dd04400b..fbdaed5d 100644 --- a/tests/testthat/test-count_pct_relrisk.R +++ b/tests/testthat/test-count_pct_relrisk.R @@ -21,7 +21,7 @@ adsl$rrisk_label <- paste(adsl[["ARM"]], "vs Placebo") adae <- left_join(adae, adsl, by = join_by(USUBJID, ARM)) -core_lyt <- basic_table(show_colcounts = TRUE) %>% +core_lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% split_cols_by("ARM") %>% split_cols_by("rrisk_header", nested = FALSE) %>% @@ -104,7 +104,7 @@ test_that("a_freq_j with val = NA and denom option", { ) # apply to adsl - here it is not yet critical to set parameter denom - tbl1 <- build_table(lyt1, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adsl) res1 <- cell_values(tbl1["CHN", "A: Drug X"]) res1_val <- unlist(unname(res1[[DrugX_column_val]])) res1_rr <- res1[[DrugX_column_rr]] %>% as.numeric() @@ -154,7 +154,7 @@ test_that("a_freq_j with val = NA and denom option", { afun = a_freq_j, extra_args = extra_args ) - tbl1b <- build_table(lyt1b, adae, adsl, round_type = "sas") + tbl1b <- build_table(lyt1b, adae, adsl) res1b <- cell_values(tbl1b["CHN", "A: Drug X"]) res1b_val <- unlist(unname(res1b[[DrugX_column_val]])) res1b_rr <- res1b[[DrugX_column_rr]] %>% as.numeric() @@ -203,7 +203,7 @@ test_that("a_freq_j with val = NA and denom option", { afun = a_freq_j, extra_args = extra_args ) - tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") + tbl1c <- build_table(lyt1c, adae, adsl) res1c <- cell_values(tbl1c["CHN", "A: Drug X"]) res1c_val <- unlist(unname(res1c[[DrugX_column_val]])) @@ -268,7 +268,7 @@ test_that("a_freq_j with risk difference method cmh", { extra_args = extra_args ) - tbl1d <- build_table(lyt1d, adae, adsl, round_type = "sas") + tbl1d <- build_table(lyt1d, adae, adsl) res1d <- cell_values(tbl1d["CHN", "A: Drug X"]) res1d_val <- unlist(unname(res1d[[DrugX_column_val]])) @@ -370,7 +370,7 @@ test_that("a_freq_j with N_subgroup as denom", { ) # applied to adsl - tbl1 <- build_table(lyt1, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adsl) tbl1x <- tbl1[ c("SEX", "F", "COUNTRY", "count_unique_denom_fraction.CHN"), seq_len(ncol(tbl1)) @@ -418,7 +418,7 @@ test_that("a_freq_j with N_subgroup as denom", { # applied to adae: HERE, when denomdf is not specified in the layout, # it will take Nsubgroup from df, not from alt_counts_df - tbl1b <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1b <- build_table(lyt1, adae, adsl) tbl1bx <- tbl1b[ c("SEX", "F", "COUNTRY", "count_unique_denom_fraction.CHN"), seq_len(ncol(tbl1b)) @@ -487,7 +487,7 @@ test_that("a_freq_j with N_subgroup as denom", { extra_args = extra_args_2 ) - tbl1c <- build_table(lyt1c, adae, adsl, round_type = "sas") + tbl1c <- build_table(lyt1c, adae, adsl) tbl1cx <- tbl1c[ c("SEX", "F", "COUNTRY", "count_unique_denom_fraction.CHN"), seq_len(ncol(tbl1c)) diff --git a/tests/testthat/test-coxph_hr.R b/tests/testthat/test-coxph_hr.R index a2a233c0..65423463 100644 --- a/tests/testthat/test-coxph_hr.R +++ b/tests/testthat/test-coxph_hr.R @@ -50,7 +50,7 @@ test_that("a_coxph_hr works with custom arguments and stratification factors", { dplyr::filter(PARAMCD == "OS") %>% dplyr::mutate(is_event = CNSR == 0) - result <- basic_table() %>% + result <- basic_table(round_type = "sas") %>% split_cols_by(var = "ARMCD") %>% analyze( vars = "AVAL", @@ -70,7 +70,7 @@ test_that("a_coxph_hr works with custom arguments and stratification factors", { .stats = c("hr_ci_3d", "pvalue") ) ) %>% - build_table(df = adtte_f, round_type = "sas") + build_table(df = adtte_f) res <- expect_silent(result) expect_snapshot(res) @@ -81,7 +81,7 @@ test_that("a_coxph_hr works with stratification factors for Log-Rank test", { dplyr::filter(PARAMCD == "OS") %>% dplyr::mutate(is_event = CNSR == 0) - result <- basic_table() %>% + result <- basic_table(round_type = "sas") %>% split_cols_by(var = "ARMCD") %>% analyze( vars = "AVAL", @@ -101,7 +101,7 @@ test_that("a_coxph_hr works with stratification factors for Log-Rank test", { .stats = c("hr_ci_3d", "pvalue") ) ) %>% - build_table(df = adtte_f, round_type = "sas") + build_table(df = adtte_f) res <- expect_silent(result) expect_snapshot(res) diff --git a/tests/testthat/test-coxreg_multivar.R b/tests/testthat/test-coxreg_multivar.R index d553b117..492ca9e9 100644 --- a/tests/testthat/test-coxreg_multivar.R +++ b/tests/testthat/test-coxreg_multivar.R @@ -100,7 +100,7 @@ test_that("summarize_coxreg_multivar works as expected with custom options", { arm = "ARM", covariates = c("SEX", "AGE") ) - lyt <- basic_table() |> + lyt <- basic_table(round_type = "sas") |> summarize_coxreg_multivar( var = "STUDYID", variables = variables, @@ -116,7 +116,7 @@ test_that("summarize_coxreg_multivar works as expected with custom options", { pval = jjcsformat_pval_fct(0.1) ) ) - result <- expect_silent(build_table(lyt, anl, round_type = "sas")) + result <- expect_silent(build_table(lyt, anl)) expect_snapshot(result) }) diff --git a/tests/testthat/test-event_free.R b/tests/testthat/test-event_free.R index 28dc561a..ed91897d 100644 --- a/tests/testthat/test-event_free.R +++ b/tests/testthat/test-event_free.R @@ -47,7 +47,7 @@ test_that("a_event_free works with default arguments in a table layout", { is_event = CNSR == 0 ) - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% split_cols_by(var = "ARMCD") for (time_point in c(3, 4, 5)) { lyt <- lyt |> @@ -65,7 +65,7 @@ test_that("a_event_free works with default arguments in a table layout", { ) ) } - result <- build_table(lyt, df = adtte_f, round_type = "sas") + result <- build_table(lyt, df = adtte_f) res <- expect_silent(result) expect_snapshot(res) @@ -79,7 +79,7 @@ test_that("a_event_free works with customized arguments in a table layout", { is_event = CNSR == 0 ) - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% split_cols_by(var = "ARMCD") for (time_point in c(5, 1, 7)) { lyt <- lyt |> @@ -103,7 +103,7 @@ test_that("a_event_free works with customized arguments in a table layout", { ) ) } - result <- build_table(lyt, df = adtte_f, round_type = "sas") + result <- build_table(lyt, df = adtte_f) res <- expect_silent(result) expect_snapshot(res) diff --git a/tests/testthat/test-get_ref_info.R b/tests/testthat/test-get_ref_info.R index a7bbd602..a53cee44 100644 --- a/tests/testthat/test-get_ref_info.R +++ b/tests/testthat/test-get_ref_info.R @@ -47,7 +47,7 @@ test_that("get_ref_info works with a df analysis function", { # Define the global reference group. ref_path <- c("colspan_trt", " ", "ARM", "B: Placebo") - lyt <- basic_table() |> + lyt <- basic_table(round_type = "sas") |> split_cols_by( "colspan_trt", split_fun = trim_levels_to_map(map = colspan_trt_map) @@ -58,18 +58,18 @@ test_that("get_ref_info works with a df analysis function", { extra_args = list(ref_path = ref_path), afun = result_afun ) - result <- build_table(lyt, dm, round_type = "sas") + result <- build_table(lyt, dm) expect_snapshot(result) # Compare with non-hierarchical layout. - std_lyt <- basic_table() |> + std_lyt <- basic_table(round_type = "sas") |> split_cols_by("ARM", ref_group = "B: Placebo") |> analyze( "AGE", extra_args = list(ref_path = ref_path), afun = standard_afun ) - std_result <- build_table(std_lyt, dm, round_type = "sas") + std_result <- build_table(std_lyt, dm) expect_snapshot(std_result) }) @@ -109,7 +109,7 @@ test_that("get_ref_info works with a vector analysis function", { # Define the global reference group. ref_path <- c("colspan_trt", " ", "ARM", "B: Placebo") - lyt <- basic_table() |> + lyt <- basic_table(round_type = "sas") |> split_cols_by( "colspan_trt", split_fun = trim_levels_to_map(map = colspan_trt_map) @@ -120,18 +120,18 @@ test_that("get_ref_info works with a vector analysis function", { extra_args = list(ref_path = ref_path), afun = result_afun ) - result <- build_table(lyt, dm, round_type = "sas") + result <- build_table(lyt, dm) expect_snapshot(result) # Compare with non-hierarchical layout. - std_lyt <- basic_table() |> + std_lyt <- basic_table(round_type = "sas") |> split_cols_by("ARM", ref_group = "B: Placebo") |> analyze( c("AGE", "BMRKR1"), extra_args = list(ref_path = ref_path), afun = standard_afun ) - std_result <- build_table(std_lyt, dm, round_type = "sas") + std_result <- build_table(std_lyt, dm) expect_snapshot(std_result) # Keep one explicit check to verify the relationship between the two outputs diff --git a/tests/testthat/test-jjcs_num_formats.R b/tests/testthat/test-jjcs_num_formats.R index 80e14c49..0f7213a8 100644 --- a/tests/testthat/test-jjcs_num_formats.R +++ b/tests/testthat/test-jjcs_num_formats.R @@ -244,7 +244,12 @@ test_that("jjcsformats count_fraction works", { na_str = rep("NA", 1) ) format_value(rep(NA, 3), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), round_type = "sas", na_str = "NA") - format_value(c(1, rep(NA, 2)), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), round_type = "sas", na_str = rep("NE", 10)) + format_value( + c(1, rep(NA, 2)), + format = jjcsformat_xx("xx.x (xx.x, xx.x)"), + round_type = "sas", + na_str = rep("NE", 10) + ) format_value( c(1, rep(NA, 2)), format = jjcsformat_xx("xx.x (xx.x, xx.x)"), diff --git a/tests/testthat/test-kaplan_meier.R b/tests/testthat/test-kaplan_meier.R index 6d77bfff..39d5d10e 100644 --- a/tests/testthat/test-kaplan_meier.R +++ b/tests/testthat/test-kaplan_meier.R @@ -95,7 +95,7 @@ test_that("a_kaplan_meier works inside analyze in table", { adtte_f$AVAL == max(adtte_f$AVAL[adtte_f$ARMCD == "ARM A"]) ] <- FALSE - result <- basic_table() %>% + result <- basic_table(round_type = "sas") %>% split_cols_by( var = "ARMCD" ) %>% @@ -108,7 +108,7 @@ test_that("a_kaplan_meier works inside analyze in table", { is_event = "is_event" ) ) %>% - build_table(df = adtte_f, round_type = "sas") + build_table(df = adtte_f) res <- expect_silent(result) expect_snapshot(res) @@ -125,7 +125,7 @@ test_that("a_kaplan_meier works inside analyze in table with custom arguments", adtte_f$AVAL == max(adtte_f$AVAL[adtte_f$ARMCD == "ARM A"]) ] <- FALSE - result <- basic_table() %>% + result <- basic_table(round_type = "sas") %>% split_cols_by( var = "ARMCD" ) %>% @@ -145,7 +145,7 @@ test_that("a_kaplan_meier works inside analyze in table with custom arguments", .indent_mods = c(median_ci_3d = 3L) ) ) %>% - build_table(df = adtte_f, round_type = "sas") + build_table(df = adtte_f) res <- expect_silent(result) expect_snapshot(res) diff --git a/tests/testthat/test-patyrs-eair100.R b/tests/testthat/test-patyrs-eair100.R index 258556fc..635edc4f 100644 --- a/tests/testthat/test-patyrs-eair100.R +++ b/tests/testthat/test-patyrs-eair100.R @@ -47,7 +47,7 @@ adsl$rrisk_label <- paste(adsl[["ARM"]], "vs Placebo") adae <- left_join(adsl, adae, by = "USUBJID") %>% mutate(ASTDY2 = ASTDY + 10) -core_lyt <- basic_table(show_colcounts = FALSE) %>% +core_lyt <- basic_table(show_colcounts = FALSE, round_type = "sas") %>% split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% split_cols_by("ARM") %>% split_cols_by("rrisk_header", nested = FALSE) %>% @@ -72,7 +72,7 @@ test_that("Check patient years numbers are giving expected result", { afun = a_patyrs_j, extra_args = extra_args ) - tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adae, adsl) res1 <- cell_values(tbl1[c("TRTDURY", "patyrs"), "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) @@ -105,7 +105,7 @@ test_that("Check aeir100 numbers are giving expected result", { ref_path = ref_path ) ) - tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adae, adsl) res1 <- cell_values(tbl1["dcd A.1.1.1.1", "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) @@ -158,7 +158,7 @@ test_that("Check aeir100 numbers are giving expected result when fup_var argumen ref_path = ref_path ) ) - tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adae, adsl) res1 <- cell_values(tbl1["dcd A.1.1.1.1", "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) @@ -211,7 +211,7 @@ test_that("Check aeir100 numbers are giving expected result when occ_dy argument ref_path = ref_path ) ) - tbl1 <- build_table(lyt1, adae, adsl, round_type = "sas") + tbl1 <- build_table(lyt1, adae, adsl) res1 <- cell_values(tbl1["dcd A.1.1.1.1", "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) diff --git a/tests/testthat/test-proportions.R b/tests/testthat/test-proportions.R index f2f38601..99db73ef 100644 --- a/tests/testthat/test-proportions.R +++ b/tests/testthat/test-proportions.R @@ -103,9 +103,9 @@ test_that("a_proportion_ci_factor works as expected", { }) test_that("prop_split_fun works as expected", { - result <- basic_table() %>% + result <- basic_table(round_type = "sas") %>% split_cols_by("ID", split_fun = prop_split_fun) |> - build_table(formatters::DM, round_type = "sas") + build_table(formatters::DM) expect_snapshot(result) }) diff --git a/tests/testthat/test-pruning_functions.R b/tests/testthat/test-pruning_functions.R index 91695af1..203f5ccf 100644 --- a/tests/testthat/test-pruning_functions.R +++ b/tests/testthat/test-pruning_functions.R @@ -2,13 +2,13 @@ library(dplyr) library(tern) # Pre-processing the table -tab <- basic_table() %>% +tab <- basic_table(round_type = "sas") %>% split_cols_by("ARM") %>% split_rows_by("RACE") %>% split_rows_by("STRATA1") %>% summarize_row_groups() %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(formatters::DM, round_type = "sas") + build_table(formatters::DM) trtvar <- "ARM" ctrl_grp <- "B: Placebo" @@ -125,20 +125,20 @@ testthat::test_that("test keep_non_null_rows", { rcell(NULL, label = "") } - tabsx <- basic_table() %>% + tabsx <- basic_table(round_type = "sas") %>% split_cols_by("ARM") %>% split_rows_by("ARM") %>% analyze("ARM", afun = xnull_cell_fn, show_labels = "hidden") %>% analyze("STRATA1", show_labels = "hidden") %>% - build_table(formatters::DM, round_type = "sas") + build_table(formatters::DM) result <- prune_table(tabsx, keep_rows(keep_non_null_rows)) - tabsx2 <- basic_table() %>% + tabsx2 <- basic_table(round_type = "sas") %>% split_cols_by("ARM") %>% split_rows_by("ARM") %>% analyze("STRATA1") %>% - build_table(formatters::DM, round_type = "sas") + build_table(formatters::DM) expected <- tabsx2 @@ -174,10 +174,10 @@ testthat::test_that("bspt_pruner both fraction and diff_from_control are NULL", }) testthat::test_that("bspt_pruner with fraction", { - tab_bspt_pruner <- basic_table() %>% + tab_bspt_pruner <- basic_table(round_type = "sas") %>% split_cols_by("ARM") %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(formatters::DM, round_type = "sas") + build_table(formatters::DM) result <- prune_table( tab_bspt_pruner, @@ -194,10 +194,10 @@ testthat::test_that("bspt_pruner with fraction", { }) testthat::test_that("bspt_pruner with fraction and diff_from_control", { - tab_bspt_pruner <- basic_table() %>% + tab_bspt_pruner <- basic_table(round_type = "sas") %>% split_cols_by("ARM") %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(formatters::DM, round_type = "sas") + build_table(formatters::DM) result <- prune_table( tab_bspt_pruner, @@ -216,10 +216,10 @@ testthat::test_that("bspt_pruner with fraction and diff_from_control", { }) testthat::test_that("bspt_pruner with fraction and diff_from_control and keeprowtext", { - tab_bspt_pruner <- basic_table() %>% + tab_bspt_pruner <- basic_table(round_type = "sas") %>% split_cols_by("ARM") %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(formatters::DM, round_type = "sas") + build_table(formatters::DM) result1 <- prune_table( tab_bspt_pruner, @@ -270,7 +270,7 @@ testthat::test_that("count_pruner in small groups", { ref_path = ref_path ) - tab_bspt_pruner <- basic_table(show_colcounts = TRUE) %>% + tab_bspt_pruner <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% split_cols_by("ARM") %>% split_cols_by("rrisk_header", nested = FALSE) %>% @@ -280,7 +280,7 @@ testthat::test_that("count_pruner in small groups", { split_fun = remove_split_levels("B: Placebo") ) %>% analyze("COUNTRY", afun = a_freq_j, extra_args = extra_args) %>% - build_table(DM_sub, round_type = "sas") + build_table(DM_sub) result <- prune_table( tab_bspt_pruner, @@ -369,7 +369,7 @@ testthat::test_that("bspt_pruner in AE like tables", { .stats = "count_unique_fraction" ) - tbl1 <- basic_table(show_colcounts = TRUE, top_level_section_div = " ") %>% + tbl1 <- basic_table(show_colcounts = TRUE, top_level_section_div = " ", round_type = "sas") %>% split_cols_by("colspan", split_fun = trim_levels_in_group("ARM")) %>% split_cols_by("ARM") %>% split_cols_by("rrisk_header", nested = FALSE) %>% @@ -402,7 +402,7 @@ testthat::test_that("bspt_pruner in AE like tables", { show_labels = "hidden", extra_args = extra_args_rr ) %>% - build_table(my_adae, my_adsl, round_type = "sas") + build_table(my_adae, my_adsl) result1 <- safe_prune_table( tbl1, @@ -457,11 +457,11 @@ testthat::test_that("bspt_pruner with less obvious control specifications", { mutate(COUNTRY = factor(as.character(COUNTRY))) %>% mutate(SEX = factor(as.character(SEX))) - tab_bspt_pruner <- basic_table(show_colcounts = TRUE) %>% + tab_bspt_pruner <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% split_cols_by("SEX") %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(DM_sub, round_type = "sas") + build_table(DM_sub) rps_label <- make_row_df(tab_bspt_pruner)$label @@ -513,10 +513,10 @@ testthat::test_that("bspt_pruner with less obvious control specifications", { my_DM <- formatters::DM %>% filter(RACE == "THIS LEAVES EMPTY DF") -my_tab <- basic_table() %>% +my_tab <- basic_table(round_type = "sas") %>% split_cols_by("ARM") %>% analyze("AGE") %>% - build_table(my_DM, round_type = "sas") + build_table(my_DM) testthat::test_that("check that if all data is pruned leaving no rows, the outcome is the message", { # create an empty table tree so we can see that safe_prune_table returns the message the user specified diff --git a/tests/testthat/test-relative_risk.R b/tests/testthat/test-relative_risk.R index 35f56ad0..9760a696 100644 --- a/tests/testthat/test-relative_risk.R +++ b/tests/testthat/test-relative_risk.R @@ -341,7 +341,7 @@ test_that("a_relative_risk in table layout gives same results as with SAS", { )) ) - lyt <- basic_table() |> + lyt <- basic_table(round_type = "sas") |> split_cols_by("Treatment") |> analyze( vars = "Response", @@ -354,7 +354,7 @@ test_that("a_relative_risk in table layout gives same results as with SAS", { ) ) - result <- build_table(lyt, dat, round_type = "sas") + result <- build_table(lyt, dat) first_row <- as.list(result[ c("Response", "rel_risk_ci"), c("Treatment", "A") diff --git a/tests/testthat/test-remove_col_count.R b/tests/testthat/test-remove_col_count.R index b11bb5b5..6b79b2fd 100644 --- a/tests/testthat/test-remove_col_count.R +++ b/tests/testthat/test-remove_col_count.R @@ -12,14 +12,15 @@ testthat::test_that("remove_col_count works", { lyt <- basic_table( top_level_section_div = " ", show_colcounts = TRUE, - colcount_format = "N=xx" + colcount_format = "N=xx", + round_type = "sas" ) %>% split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% split_cols_by("ARM") %>% split_cols_by("set2", nested = FALSE) %>% split_cols_by("ARM", split_fun = remove_split_levels("B: Placebo")) - tbl <- build_table(lyt, adsl, round_type = "sas") + tbl <- build_table(lyt, adsl) tbl2 <- remove_col_count(tbl, span_label_var = "set2") diff --git a/tests/testthat/test-resp01_functions.R b/tests/testthat/test-resp01_functions.R index a05a8460..4fbad223 100644 --- a/tests/testthat/test-resp01_functions.R +++ b/tests/testthat/test-resp01_functions.R @@ -5,10 +5,10 @@ test_that("resp01_split_fun_fct 1 works as expected", { method = "or_cmh", conf_level = 0.95 ) - result <- basic_table() %>% + result <- basic_table(round_type = "sas") %>% split_cols_by("ARM", split_fun = add_overall_level("Overall")) %>% split_cols_by("ID", split_fun = split_fun) |> - build_table(formatters::DM, round_type = "sas") + build_table(formatters::DM) expect_snapshot(result) }) @@ -17,10 +17,10 @@ test_that("resp01_split_fun_fct 2 works as expected", { method = "rr", conf_level = 0.92 ) - result <- basic_table() %>% + result <- basic_table(round_type = "sas") %>% split_cols_by("ARM", split_fun = add_overall_level("Overall")) %>% split_cols_by("ID", split_fun = split_fun) |> - build_table(formatters::DM, round_type = "sas") + build_table(formatters::DM) expect_snapshot(col_info(result)) }) diff --git a/tests/testthat/test-response_by_var.R b/tests/testthat/test-response_by_var.R index ff418c24..99fb43d3 100644 --- a/tests/testthat/test-response_by_var.R +++ b/tests/testthat/test-response_by_var.R @@ -16,7 +16,7 @@ adsl <- adsl %>% mutate(TRTEMFL = ifelse(is.na(TRTEMFL), "N", "Y")) test_that("response_by_var various scenarios", { - lyt <- basic_table(show_colcounts = TRUE) %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% split_cols_by("ARM") %>% analyze( vars = "SEX", @@ -28,7 +28,7 @@ test_that("response_by_var various scenarios", { ) ## Scenario 1: TRTEMFL has no missing values and values Y/N - tbl <- build_table(lyt, adsl, round_type = "sas") + tbl <- build_table(lyt, adsl) res1 <- cell_values(tbl[c("SEX", "F"), "A: Drug X"]) res1 <- unlist(unname(res1)) @@ -47,7 +47,7 @@ test_that("response_by_var various scenarios", { adsl2$TRTEMFL <- ifelse(adsl2$TRTEMFL == "Y", "Y", NA) adsl2$TRTEMFL <- factor(adsl2$TRTEMFL, levels = "Y") - tbl2 <- build_table(lyt, adsl2, round_type = "sas") + tbl2 <- build_table(lyt, adsl2) expect_snapshot(tbl2) ## Scenario 3: TRTEMFL has missing values and Y only, and analysis variable has missing values @@ -56,7 +56,7 @@ test_that("response_by_var various scenarios", { adsl3$TRTEMFL <- factor(adsl3$TRTEMFL, levels = "Y") adsl3$SEX[1:10] <- NA_character_ - tbl3 <- build_table(lyt, adsl3, round_type = "sas") + tbl3 <- build_table(lyt, adsl3) res3 <- cell_values(tbl3[c("SEX", "F"), "A: Drug X"]) res3 <- unlist(unname(res3)) @@ -75,7 +75,7 @@ test_that("response_by_var various scenarios", { adsl4$SEX[1:10] <- NA_character_ adsl4$TRTEMFL[8:15] <- NA_character_ - tbl4 <- build_table(lyt, adsl4, round_type = "sas") + tbl4 <- build_table(lyt, adsl4) res4 <- cell_values(tbl4[c("SEX", "F"), "A: Drug X"]) res4 <- unlist(unname(res4)) @@ -96,7 +96,7 @@ test_that("response_by_var various scenarios", { levels = c(levels(adsl5$SEX), "extra level") ) - tbl5 <- build_table(lyt, adsl5, round_type = "sas") + tbl5 <- build_table(lyt, adsl5) res5 <- cell_values(tbl5[c("SEX", "extra level"), "A: Drug X"]) res5 <- unlist(unname(res5)) diff --git a/tests/testthat/test-sorting_functions.R b/tests/testthat/test-sorting_functions.R index 1ed50161..a3dd9cfe 100644 --- a/tests/testthat/test-sorting_functions.R +++ b/tests/testthat/test-sorting_functions.R @@ -10,13 +10,13 @@ DM2$spanhead <- factor( levels = c("This is a Spanning Header", " ") ) -tab <- basic_table() %>% +tab <- basic_table(round_type = "sas") %>% split_cols_by("spanhead", split_fun = trim_levels_in_group("ARM")) %>% split_cols_by("ARM") %>% split_rows_by("STRATA1") %>% summarize_row_groups() %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(DM2, round_type = "sas") + build_table(DM2) #### Tests for jj_complex_scorefun function #### testthat::test_that("jj_complex_scorefun is identical to standard sorting: spanningheadercolvar=NA", { @@ -131,14 +131,14 @@ testthat::test_that("jj_complex_scorefun places specified category at the end: l testthat::expect_identical(result, expected) }) -tab2 <- basic_table() %>% +tab2 <- basic_table(round_type = "sas") %>% split_cols_by("spanhead", split_fun = trim_levels_in_group("ARM")) %>% split_cols_by("ARM") %>% split_cols_by("RACE") %>% split_rows_by("STRATA1") %>% summarize_row_groups() %>% analyze_vars("COUNTRY", .stats = "count_fraction") %>% - build_table(DM2, round_type = "sas") + build_table(DM2) testthat::test_that("jj_complex_scorefun uses first column to sort: usefirstcol", { result <- sort_at_path( diff --git a/tests/testthat/test-split_functions.R b/tests/testthat/test-split_functions.R index d0867780..2c22b465 100644 --- a/tests/testthat/test-split_functions.R +++ b/tests/testthat/test-split_functions.R @@ -25,12 +25,13 @@ testthat::test_that("cond_rm_facets works", { lyt <- basic_table( top_level_section_div = " ", show_colcounts = TRUE, - colcount_format = "N=xx" + colcount_format = "N=xx", + round_type = "sas" ) %>% split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% split_cols_by("ARM", split_fun = mysplit) - tbl <- build_table(lyt, adsl, round_type = "sas") + tbl <- build_table(lyt, adsl) cols <- make_col_df(tbl, visible_only = TRUE)$name expected <- c("A: Drug X", "C: Combination", "Combined", "B: Placebo") @@ -55,11 +56,11 @@ testthat::test_that("rm_levels works", { pre = list(rm_levels(excl = c("JPN", "USA", "NGA"))) ) - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% split_rows_by("COUNTRY", split_fun = split_fun) %>% summarize_row_groups() # for simplicity - tbl <- build_table(lyt, adsl, round_type = "sas") + tbl <- build_table(lyt, adsl) expected <- setdiff(levels(adsl$COUNTRY), c("JPN", "USA", "NGA")) @@ -76,11 +77,11 @@ testthat::test_that("real_add_overall_facet works", { post = list(real_add_overall_facet("Overall", "Overall")) ) - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% split_rows_by("COUNTRY", split_fun = split_fun) %>% summarize_row_groups() # for simplicity - tbl <- build_table(lyt, adsl, round_type = "sas") + tbl <- build_table(lyt, adsl) expected <- c(levels(adsl$COUNTRY), "Overall") @@ -100,11 +101,11 @@ testthat::test_that("make_combo_splitfun works", { levels = c("USA", "CAN") ) - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% split_rows_by("COUNTRY", split_fun = split_fun) %>% summarize_row_groups() # for simplicity - tbl <- build_table(lyt, adsl, round_type = "sas") + tbl <- build_table(lyt, adsl) expected <- "Some Combined Countries" @@ -137,11 +138,11 @@ testthat::test_that("combine_nonblank works", { split_fun <- make_split_fun(post = list(combine_nonblank("Overall", "Overall"))) - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% split_rows_by("COUNTRY", split_fun = split_fun) %>% summarize_row_groups() # for simplicity - tbl <- build_table(lyt, adsl, round_type = "sas") + tbl <- build_table(lyt, adsl) expected <- c(levels(adsl$COUNTRY), "Overall") @@ -172,16 +173,16 @@ testthat::test_that("rm_blank_levels works", { pre = list(rm_blank_levels) ) - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% split_rows_by("COUNTRY") %>% summarize_row_groups() - tbl <- build_table(lyt, adsl, round_type = "sas") + tbl <- build_table(lyt, adsl) row_names_before <- rtables::row.names(tbl) - lyt <- basic_table() %>% + lyt <- basic_table(round_type = "sas") %>% split_rows_by("COUNTRY", split_fun = split_fun) %>% summarize_row_groups() - tbl <- build_table(lyt, adsl, round_type = "sas") + tbl <- build_table(lyt, adsl) row_names_after <- rtables::row.names(tbl) diff --git a/tests/testthat/test-summarize_ancova.R b/tests/testthat/test-summarize_ancova.R index 2a711b9e..a078c8dd 100644 --- a/tests/testthat/test-summarize_ancova.R +++ b/tests/testthat/test-summarize_ancova.R @@ -143,7 +143,7 @@ test_that("s_summarize_ancova works as expected", { }) test_that("a_summarize_ancova_j works as expected in table layout", { - result <- basic_table() %>% + result <- basic_table(round_type = "sas") %>% split_cols_by("Species") %>% add_colcounts() %>% analyze( @@ -188,6 +188,6 @@ test_that("a_summarize_ancova_j works as expected in table layout", { ) ) ) %>% - build_table(iris, round_type = "sas") + build_table(iris) expect_snapshot(result) }) From c01ecc21a1f9b020b553a9e2d0dce18d431c1897 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Tue, 2 Dec 2025 12:47:32 +0000 Subject: [PATCH 17/49] document --- NAMESPACE | 1 + man/jjcsformat_xx.Rd | 6 +++--- man/rbmi_analyse.Rd | 2 +- man/tt_to_tlgrtf.Rd | 4 ++-- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/NAMESPACE b/NAMESPACE index 64970005..02778548 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -42,6 +42,7 @@ export(create_colspan_var) export(d_test_proportion_diff_j) export(def_colwidths) export(default_str_map) +export(do_exclude_split) export(find_missing_chg_after_avisit) export(fit_ancova) export(fit_mmrm_j) diff --git a/man/jjcsformat_xx.Rd b/man/jjcsformat_xx.Rd index 3b6892bd..70e60700 100644 --- a/man/jjcsformat_xx.Rd +++ b/man/jjcsformat_xx.Rd @@ -69,7 +69,7 @@ is.function(fmt) fmt format_value(value[1], fmt, round_type = "sas") format_value(value[1], fmt, round_type = "iec") -fmt(value[1]) +if (is.function(fmt)) fmt(value[1]) fmt2 <- jjcsformat_xx("xx.x (xx.xxx)") is.function(fmt2) @@ -77,11 +77,11 @@ value <- c(1.65, 8.645) format_value(value, fmt2, round_type = "sas") format_value(value, fmt2, round_type = "iec") # only possible when resulting format is a function -fmt2(value, round_type = "sas") +if (is.function(fmt2)) fmt2(value, round_type = "sas") value <- c(1.65, NA) format_value(value, fmt2, round_type = "iec", na_str = c("ne1", "ne2")) -fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) +if (is.function(fmt2)) fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) my_pval_format <- jjcsformat_pval_fct(0.005) my_pval_format(0.2802359) my_pval_format(0.0048) diff --git a/man/rbmi_analyse.Rd b/man/rbmi_analyse.Rd index 5f3d8613..ec8c6b91 100644 --- a/man/rbmi_analyse.Rd +++ b/man/rbmi_analyse.Rd @@ -61,7 +61,7 @@ via \code{...}. \code{fun} must return a named list with each element itself being a list containing a single numeric element called \code{est} (or additionally \code{se} and \code{df} if -you had originally specified \code{\link[rbmi:method_bayes]{rbmi::method_bayes()}} or \code{\link[rbmi:method_approxbayes]{rbmi::method_approxbayes()}}) +you had originally specified \code{\link[rbmi:method]{rbmi::method_bayes()}} or \code{\link[rbmi:method]{rbmi::method_approxbayes()}}) i.e.: \preformatted{ myfun <- function(dat, ...) { diff --git a/man/tt_to_tlgrtf.Rd b/man/tt_to_tlgrtf.Rd index 61ee195f..ef27835c 100644 --- a/man/tt_to_tlgrtf.Rd +++ b/man/tt_to_tlgrtf.Rd @@ -26,7 +26,7 @@ tt_to_tlgrtf( combined_rtf = FALSE, one_table = TRUE, border_mat = make_header_bordmat(obj = tt), - round_type = NULL, + round_type = obj_round_type(tt), validate = TRUE, ... ) @@ -87,7 +87,7 @@ default behavior does not meet their needs.} \item{round_type}{(\code{"iec"} or \code{"sas"})\cr the type of rounding to perform. iec, the default, peforms rounding compliant with IEC 60559, while sas performs nearest-value rounding consistent with rounding within SAS. -See \code{?formatters::format_value} for more details.} +See \verb{[formatters::format_value()]} for more details.} \item{validate}{logical(1). Whether to validate the table structure using \code{rtables::validate_table_struct()}. Defaults to \code{TRUE}. This can also be disabled From a8a7080385313a57447a649bbb234ea2ad5ef7ae Mon Sep 17 00:00:00 2001 From: iaugusty Date: Tue, 2 Dec 2025 12:48:51 +0000 Subject: [PATCH 18/49] code update + styler --- R/a_freq_j.R | 217 +++++++++---------- R/cmhrms.R | 25 +-- R/h_freq_funs.R | 89 ++++---- R/jjcsformats.R | 19 +- R/junco_utils_default_stats_formats_labels.R | 18 +- R/split_functions.R | 17 +- R/tabulate_lsmeans_wide.R | 54 +++-- R/tt_to_tblfile.R | 91 ++++---- 8 files changed, 254 insertions(+), 276 deletions(-) diff --git a/R/a_freq_j.R b/R/a_freq_j.R index 8ddcfa81..c5c654fb 100644 --- a/R/a_freq_j.R +++ b/R/a_freq_j.R @@ -75,19 +75,18 @@ #' @export #' @importFrom stats setNames s_freq_j <- function( - df, - .var, - .df_row, - val = NULL, - drop_levels = FALSE, - excl_levels = NULL, - alt_df, - parent_df, - id = "USUBJID", - denom = c("n_df", "n_altdf", "N_col", "n_rowdf", "n_parentdf"), - .N_col, - countsource = c("df", "altdf") -) { + df, + .var, + .df_row, + val = NULL, + drop_levels = FALSE, + excl_levels = NULL, + alt_df, + parent_df, + id = "USUBJID", + denom = c("n_df", "n_altdf", "N_col", "n_rowdf", "n_parentdf"), + .N_col, + countsource = c("df", "altdf")) { if (is.na(.var) || is.null(.var)) { stop("Argument .var cannot be NA or NULL.") } @@ -206,19 +205,18 @@ s_freq_j <- function( } s_rel_risk_levii_j <- function( - levii, - df, - .var, - ref_df, - ref_denom_df, - .in_ref_col, - curgrp_denom_df, - id, - variables, - conf_level, - method, - weights_method -) { + levii, + df, + .var, + ref_df, + ref_denom_df, + .in_ref_col, + curgrp_denom_df, + id, + variables, + conf_level, + method, + weights_method) { dfii <- df[df[[.var]] == levii & !is.na(df[[.var]]), ] ref_dfii <- ref_df[ref_df[[.var]] == levii & !is.na(ref_df[[.var]]), ] @@ -250,32 +248,31 @@ s_rel_risk_levii_j <- function( s_rel_risk_val_j <- function( - df, - .var, - .df_row, - ctrl_grp, - cur_trt_grp, - trt_var, - val = NULL, - drop_levels = FALSE, - excl_levels = NULL, - denom_df, - id = "USUBJID", - riskdiff = TRUE, - variables = list(strata = NULL), - conf_level = 0.95, - method = c( - "waldcc", - "wald", - "cmh", - "ha", - "newcombe", - "newcombecc", - "strat_newcombe", - "strat_newcombecc" - ), - weights_method = "cmh" -) { + df, + .var, + .df_row, + ctrl_grp, + cur_trt_grp, + trt_var, + val = NULL, + drop_levels = FALSE, + excl_levels = NULL, + denom_df, + id = "USUBJID", + riskdiff = TRUE, + variables = list(strata = NULL), + conf_level = 0.95, + method = c( + "waldcc", + "wald", + "cmh", + "ha", + "newcombe", + "newcombecc", + "strat_newcombe", + "strat_newcombecc" + ), + weights_method = "cmh") { if (drop_levels) { obs_levs <- unique(.df_row[[.var]]) obs_levs <- intersect(levels(.df_row[[.var]]), obs_levs) @@ -587,51 +584,50 @@ s_rel_risk_val_j <- function( #' the statistic is replaced by the relative risk difference + confidence interval. #' @export a_freq_j <- function( - df, - labelstr = NULL, - .var = NA, - val = NULL, - drop_levels = FALSE, - excl_levels = NULL, - new_levels = NULL, - new_levels_after = FALSE, - addstr2levs = NULL, - .df_row, - .spl_context, - .N_col, - id = "USUBJID", - denom = c("N_col", "n_df", "n_altdf", "N_colgroup", "n_rowdf", "n_parentdf"), - riskdiff = TRUE, - ref_path = NULL, - variables = list(strata = NULL), - conf_level = 0.95, - method = c( - "wald", - "waldcc", - "cmh", - "ha", - "newcombe", - "newcombecc", - "strat_newcombe", - "strat_newcombecc" - ), - weights_method = "cmh", - label = NULL, - label_fstr = NULL, - label_map = NULL, - .alt_df_full = NULL, - denom_by = NULL, - .stats = c("count_unique_denom_fraction"), - .formats = NULL, - .indent_mods = NULL, - na_str = rep("NA", 3), - .labels_n = NULL, - extrablankline = FALSE, - extrablanklineafter = NULL, - restr_columns = NULL, - colgroup = NULL, - countsource = c("df", "altdf") -) { + df, + labelstr = NULL, + .var = NA, + val = NULL, + drop_levels = FALSE, + excl_levels = NULL, + new_levels = NULL, + new_levels_after = FALSE, + addstr2levs = NULL, + .df_row, + .spl_context, + .N_col, + id = "USUBJID", + denom = c("N_col", "n_df", "n_altdf", "N_colgroup", "n_rowdf", "n_parentdf"), + riskdiff = TRUE, + ref_path = NULL, + variables = list(strata = NULL), + conf_level = 0.95, + method = c( + "wald", + "waldcc", + "cmh", + "ha", + "newcombe", + "newcombecc", + "strat_newcombe", + "strat_newcombecc" + ), + weights_method = "cmh", + label = NULL, + label_fstr = NULL, + label_map = NULL, + .alt_df_full = NULL, + denom_by = NULL, + .stats = c("count_unique_denom_fraction"), + .formats = NULL, + .indent_mods = NULL, + na_str = rep("NA", 3), + .labels_n = NULL, + extrablankline = FALSE, + extrablanklineafter = NULL, + restr_columns = NULL, + colgroup = NULL, + countsource = c("df", "altdf")) { denom <- match.arg(denom) method <- match.arg(method) @@ -890,20 +886,19 @@ a_freq_j <- function( #' `exclude_levels` argument, see `?do_exclude_split` for details. #' @export a_freq_j_with_exclude <- function( - df, - labelstr = NULL, - exclude_levels, - .var = NA, - .spl_context, - .df_row, - .N_col, - .alt_df_full = NULL, - .stats = "count_unique_denom_fraction", - .formats = NULL, - .indent_mods = NULL, - .labels_n = NULL, - ... -) { + df, + labelstr = NULL, + exclude_levels, + .var = NA, + .spl_context, + .df_row, + .N_col, + .alt_df_full = NULL, + .stats = "count_unique_denom_fraction", + .formats = NULL, + .indent_mods = NULL, + .labels_n = NULL, + ...) { if (do_exclude_split(exclude_levels, .spl_context)) { NULL } else { diff --git a/R/cmhrms.R b/R/cmhrms.R index 584a4a4a..5f706b31 100644 --- a/R/cmhrms.R +++ b/R/cmhrms.R @@ -148,19 +148,18 @@ a_cmhrms_j <- function(df, .var, #' @export #' @order 3 a_cmhrms_j_with_exclude <- function( - df, - exclude_levels, - .var, - .spl_context, - .ref_group, - .in_ref_col, - .df_row, - ..., - .stats = NULL, - .formats = NULL, - .indent_mods = NULL, - .labels = NULL -) { + df, + exclude_levels, + .var, + .spl_context, + .ref_group, + .in_ref_col, + .df_row, + ..., + .stats = NULL, + .formats = NULL, + .indent_mods = NULL, + .labels = NULL) { if (do_exclude_split(exclude_levels, .spl_context)) { NULL } else { diff --git a/R/h_freq_funs.R b/R/h_freq_funs.R index 87c727ec..18763157 100644 --- a/R/h_freq_funs.R +++ b/R/h_freq_funs.R @@ -332,20 +332,19 @@ h_get_trtvar_refpath <- function(ref_path, .spl_context, df) { #' @noRd #' @keywords internal h_upd_dfrow <- function( - df_row, - .var, - val, - excl_levels, - drop_levels, - new_levels, - new_levels_after, - addstr2levs, - label, - label_map, - labelstr, - label_fstr, - .spl_context -) { + df_row, + .var, + val, + excl_levels, + drop_levels, + new_levels, + new_levels_after, + addstr2levs, + label, + label_map, + labelstr, + label_fstr, + .spl_context) { if (!is.null(label) && !is.null(label_map)) { stop("a_freq_j: label and label_map cannot be used together.") } @@ -528,28 +527,27 @@ h_get_label_map <- function(.labels, label_map, .var, split_info) { #' @param .stats Statistics to compute. #' @return List containing prepared data frames and values. h_a_freq_dataprep <- function( - df, - labelstr = NULL, - .var = NA, - val = NULL, - drop_levels = FALSE, - excl_levels = NULL, - new_levels = NULL, - new_levels_after = FALSE, - addstr2levs = NULL, - .df_row, - .spl_context, - .N_col, - id = "USUBJID", - denom = c("N_col", "n_df", "n_altdf", "N_colgroup", "n_rowdf", "n_parentdf"), - variables, - label = NULL, - label_fstr = NULL, - label_map = NULL, - .alt_df_full = NULL, - denom_by = NULL, - .stats -) { + df, + labelstr = NULL, + .var = NA, + val = NULL, + drop_levels = FALSE, + excl_levels = NULL, + new_levels = NULL, + new_levels_after = FALSE, + addstr2levs = NULL, + .df_row, + .spl_context, + .N_col, + id = "USUBJID", + denom = c("N_col", "n_df", "n_altdf", "N_colgroup", "n_rowdf", "n_parentdf"), + variables, + label = NULL, + label_fstr = NULL, + label_map = NULL, + .alt_df_full = NULL, + denom_by = NULL, + .stats) { denom <- match.arg(denom) df <- df[!is.na(df[[.var]]), ] @@ -636,16 +634,15 @@ h_a_freq_dataprep <- function( #' @noRd #' @keywords internal h_a_freq_prepinrows <- function( - x_stats, - .stats_adj, - .formats, - labelstr, - label_fstr, - label, - .indent_mods, - .labels_n, - na_str -) { + x_stats, + .stats_adj, + .formats, + labelstr, + label_fstr, + label, + .indent_mods, + .labels_n, + na_str) { # Fill in formatting defaults x_stats <- x_stats[.stats_adj] diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 7ac534d1..7bc06f05 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -23,7 +23,7 @@ #' fmt #' format_value(value[1], fmt, round_type = "sas") #' format_value(value[1], fmt, round_type = "iec") -#' fmt(value[1]) +#' if (is.function(fmt)) fmt(value[1]) #' #' fmt2 <- jjcsformat_xx("xx.x (xx.xxx)") #' is.function(fmt2) @@ -31,17 +31,16 @@ #' format_value(value, fmt2, round_type = "sas") #' format_value(value, fmt2, round_type = "iec") #' # only possible when resulting format is a function -#' fmt2(value, round_type = "sas") +#' if (is.function(fmt2)) fmt2(value, round_type = "sas") #' #' value <- c(1.65, NA) #' format_value(value, fmt2, round_type = "iec", na_str = c("ne1", "ne2")) -#' fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) +#' if (is.function(fmt2)) fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) jjcsformat_xx <- function( - str, - na_str = na_str_dflt, - na_str_dflt = "NE", - replace_na_dflt = TRUE -) { + str, + na_str = na_str_dflt, + na_str_dflt = "NE", + replace_na_dflt = TRUE) { if (grepl("xxx.", str, fixed = TRUE)) { stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") } @@ -271,7 +270,7 @@ jjcsformat_fraction_count_denom <- jjcsformat_cnt_den_fract_fct(type = "fraction jjcsformat_pval_fct <- function(alpha = 0.05) { checkmate::assert_number(alpha, lower = 0, upper = 1) - function(x, round_type = c("iec", "sas"), na_str, ...) { + function(x, round_type = valid_round_type, na_str, ...) { round_type <- match.arg(round_type) checkmate::assert_number( x, @@ -329,7 +328,7 @@ jjcsformat_pval_fct <- function(alpha = 0.05) { jjcsformat_range_fct <- function(str) { format_xx <- jjcsformat_xx(str) - function(x, output, round_type = c("iec", "sas"), ...) { + function(x, output, round_type = valid_round_type, ...) { round_type <- match.arg(round_type) checkmate::assert_numeric( x, diff --git a/R/junco_utils_default_stats_formats_labels.R b/R/junco_utils_default_stats_formats_labels.R index c75a76b8..fa1345cc 100644 --- a/R/junco_utils_default_stats_formats_labels.R +++ b/R/junco_utils_default_stats_formats_labels.R @@ -46,11 +46,10 @@ NULL #' #' @export junco_get_stats <- function( - method_groups = "analyze_vars_numeric", - stats_in = NULL, - custom_stats_in = NULL, - add_pval = FALSE -) { + method_groups = "analyze_vars_numeric", + stats_in = NULL, + custom_stats_in = NULL, + add_pval = FALSE) { tern_get_stats( method_groups = method_groups, stats_in = stats_in, @@ -97,11 +96,10 @@ junco_get_formats_from_stats <- function(stats, formats_in = NULL, levels_per_st #' #' @export junco_get_labels_from_stats <- function( - stats, - labels_in = NULL, - levels_per_stats = NULL, - label_attr_from_stats = NULL -) { + stats, + labels_in = NULL, + levels_per_stats = NULL, + label_attr_from_stats = NULL) { tern_get_labels_from_stats( stats = stats, labels_in = labels_in, diff --git a/R/split_functions.R b/R/split_functions.R index 8918538f..a47dd450 100644 --- a/R/split_functions.R +++ b/R/split_functions.R @@ -233,15 +233,14 @@ resolve_ancestor_pos <- function(anc_pos, numrows) { #' #' stopifnot(identical(cell_values(tbl2), cell_values(tbl3))) cond_rm_facets <- function( - facets = NULL, - facets_regex = NULL, - ancestor_pos = 1, - split = NULL, - split_regex = NULL, - value = NULL, - value_regex = NULL, - keep_matches = FALSE -) { + facets = NULL, + facets_regex = NULL, + ancestor_pos = 1, + split = NULL, + split_regex = NULL, + value = NULL, + value_regex = NULL, + keep_matches = FALSE) { ## detect errors/miscalling which don't even require us to have the facets if (is.null(split) && is.null(split_regex) && is.null(value) && is.null(value_regex)) { stop( diff --git a/R/tabulate_lsmeans_wide.R b/R/tabulate_lsmeans_wide.R index c455f996..87d9b774 100644 --- a/R/tabulate_lsmeans_wide.R +++ b/R/tabulate_lsmeans_wide.R @@ -125,16 +125,15 @@ lsmeans_wide_second_split_fun_fct <- function(pval_sided, conf_level, include_pv #' #' @keywords internal lsmeans_wide_cfun <- function( - df, - labelstr, - .spl_context, - variables, - ref_level, - treatment_levels, - pval_sided = c("2", "1", "-1"), - conf_level, - formats -) { + df, + labelstr, + .spl_context, + variables, + ref_level, + treatment_levels, + pval_sided = c("2", "1", "-1"), + conf_level, + formats) { this_col_split <- .spl_context[nrow(.spl_context), "cur_col_split_val"][[1]] pad_in_rows <- pad_in_rows_fct(length_out = length(treatment_levels), label = labelstr) if (this_col_split[1] %in% c("reference_group", "testing_group")) { @@ -222,24 +221,23 @@ lsmeans_wide_cfun <- function( #' ) |> #' build_table(df = anl) summarize_lsmeans_wide <- function( - lyt, - variables, - ref_level, - treatment_levels, - conf_level, - pval_sided = "2", - include_variance = TRUE, - include_pval = TRUE, - formats = list( - lsmean = jjcsformat_xx("xx.x"), - mse = jjcsformat_xx("xx.x"), - df = jjcsformat_xx("xx."), - lsmean_diff = jjcsformat_xx("xx.x"), - se = jjcsformat_xx("xx.xx"), - ci = jjcsformat_xx("(xx.xx, xx.xx)"), - pval = jjcsformat_pval_fct(0) - ) -) { + lyt, + variables, + ref_level, + treatment_levels, + conf_level, + pval_sided = "2", + include_variance = TRUE, + include_pval = TRUE, + formats = list( + lsmean = jjcsformat_xx("xx.x"), + mse = jjcsformat_xx("xx.x"), + df = jjcsformat_xx("xx."), + lsmean_diff = jjcsformat_xx("xx.x"), + se = jjcsformat_xx("xx.xx"), + ci = jjcsformat_xx("(xx.xx, xx.xx)"), + pval = jjcsformat_pval_fct(0) + )) { # Check that all required format elements are present in the formats parameter checkmate::assert_names( names(formats), diff --git a/R/tt_to_tblfile.R b/R/tt_to_tblfile.R index 3f6d7c26..2f7f545e 100644 --- a/R/tt_to_tblfile.R +++ b/R/tt_to_tblfile.R @@ -7,11 +7,10 @@ #' @return `tt` represented as a `tbl` data.frame suitable for passing #' to [tidytlg::gentlg] via the `huxme` argument. tt_to_tbldf <- function( - tt, - fontspec = font_spec("Times", 9L, 1), - string_map = default_str_map, - markup_df = dps_markup_df -) { + tt, + fontspec = font_spec("Times", 9L, 1), + string_map = default_str_map, + markup_df = dps_markup_df) { if (!validate_table_struct(tt)) { stop( "invalid table structure. summarize_row_groups without ", @@ -109,10 +108,9 @@ tlg_type <- function(tt) { } mpf_to_colspan <- function( - mpf, - string_map = default_str_map, - markup_df = dps_markup_df -) { + mpf, + string_map = default_str_map, + markup_df = dps_markup_df) { if (!methods::is(mpf, "MatrixPrintForm")) { stop("figure out how to make this an mpf (MatrixPrintForm) first.") } @@ -254,7 +252,7 @@ get_ncol <- function(tt) { #' @param round_type (`"iec"` or `"sas"`)\cr the type of rounding to perform. iec, #' the default, peforms rounding compliant with IEC 60559, while #' sas performs nearest-value rounding consistent with rounding within SAS. -#' See `?formatters::format_value` for more details. +#' See `[formatters::format_value()]` for more details. #' @param validate logical(1). Whether to validate the table structure using #' `rtables::validate_table_struct()`. Defaults to `TRUE`. This can also be disabled #' globally by setting the environment variable `JUNCO_DISABLE_VALIDATION=TRUE`. @@ -273,39 +271,38 @@ get_ncol <- function(tt) { #' @return If `file` is non-NULL, this is called for the side-effect of writing #' one or more RTF files. Otherwise, returns a list of `huxtable` objects. tt_to_tlgrtf <- function( - tt, - file = NULL, - orientation = c("portrait", "landscape"), - colwidths = def_colwidths( tt, - fontspec, - col_gap = col_gap, - label_width_ins = label_width_ins, - type = tlgtype - ), - label_width_ins = 2, - watermark = NULL, - pagenum = ifelse(tlgtype == "Listing", TRUE, FALSE), - fontspec = font_spec("Times", 9L, 1.2), - pg_width = pg_width_by_orient(orientation == "landscape"), - margins = c(0, 0, 0, 0), - paginate = tlg_type(tt) == "Table", - col_gap = ifelse(tlgtype == "Listing", .5, 3), - nosplitin = list( - row = character(), - col = character() - ), - verbose = FALSE, - tlgtype = tlg_type(tt), - string_map = default_str_map, - markup_df = dps_markup_df, - combined_rtf = FALSE, - one_table = TRUE, - border_mat = make_header_bordmat(obj = tt), - round_type = NULL, - validate = TRUE, - ... -) { + file = NULL, + orientation = c("portrait", "landscape"), + colwidths = def_colwidths( + tt, + fontspec, + col_gap = col_gap, + label_width_ins = label_width_ins, + type = tlgtype + ), + label_width_ins = 2, + watermark = NULL, + pagenum = ifelse(tlgtype == "Listing", TRUE, FALSE), + fontspec = font_spec("Times", 9L, 1.2), + pg_width = pg_width_by_orient(orientation == "landscape"), + margins = c(0, 0, 0, 0), + paginate = tlg_type(tt) == "Table", + col_gap = ifelse(tlgtype == "Listing", .5, 3), + nosplitin = list( + row = character(), + col = character() + ), + verbose = FALSE, + tlgtype = tlg_type(tt), + string_map = default_str_map, + markup_df = dps_markup_df, + combined_rtf = FALSE, + one_table = TRUE, + border_mat = make_header_bordmat(obj = tt), + round_type = obj_round_type(tt), + validate = TRUE, + ...) { # Validate table structure if requested and not disabled by environment variable # nolint start if (validate && tlgtype == "Table" && methods::is(tt, "VTableTree") && @@ -324,9 +321,6 @@ tt_to_tlgrtf <- function( on.exit(close_font_dev()) } - if (is.null(round_type) && tlgtype == "Table") { - round_type <- round_type(tt) - } if (tlgtype == "Listing" && nrow(tt) == 0) { dat <- as.list(c("No data to report", rep("", ncol(tt) - 1))) names(dat) <- names(tt) @@ -384,7 +378,7 @@ tt_to_tlgrtf <- function( } else if (methods::is(tt, "list") && methods::is(tt[[1]], "MatrixPrintForm")) { hdrmpf <- tt[[1]] } else { - hrdmpf <- tt + hdrmpf <- tt } pags <- paginate_to_mpfs( @@ -711,9 +705,8 @@ fixup_bord_mat <- function(brdmat, hstrs) { } .make_header_bordmat <- function( - obj, - mpf = matrix_form(utils::head(obj, 1), expand_newlines = FALSE) -) { + obj, + mpf = matrix_form(utils::head(obj, 1), expand_newlines = FALSE)) { spns <- mf_spans(mpf) nlh <- mf_nlheader(mpf) nrh <- mf_nrheader(mpf) From f97632583bba3dd57799cceb3efec7186919f398 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 12:55:56 +0000 Subject: [PATCH 19/49] [skip style] [skip vbump] Restyle files --- R/a_freq_j.R | 217 ++++++++++--------- R/cmhrms.R | 25 ++- R/h_freq_funs.R | 89 ++++---- R/jjcsformats.R | 9 +- R/junco_utils_default_stats_formats_labels.R | 18 +- R/split_functions.R | 17 +- R/tabulate_lsmeans_wide.R | 54 ++--- R/tt_to_tblfile.R | 84 +++---- 8 files changed, 266 insertions(+), 247 deletions(-) diff --git a/R/a_freq_j.R b/R/a_freq_j.R index c5c654fb..8ddcfa81 100644 --- a/R/a_freq_j.R +++ b/R/a_freq_j.R @@ -75,18 +75,19 @@ #' @export #' @importFrom stats setNames s_freq_j <- function( - df, - .var, - .df_row, - val = NULL, - drop_levels = FALSE, - excl_levels = NULL, - alt_df, - parent_df, - id = "USUBJID", - denom = c("n_df", "n_altdf", "N_col", "n_rowdf", "n_parentdf"), - .N_col, - countsource = c("df", "altdf")) { + df, + .var, + .df_row, + val = NULL, + drop_levels = FALSE, + excl_levels = NULL, + alt_df, + parent_df, + id = "USUBJID", + denom = c("n_df", "n_altdf", "N_col", "n_rowdf", "n_parentdf"), + .N_col, + countsource = c("df", "altdf") +) { if (is.na(.var) || is.null(.var)) { stop("Argument .var cannot be NA or NULL.") } @@ -205,18 +206,19 @@ s_freq_j <- function( } s_rel_risk_levii_j <- function( - levii, - df, - .var, - ref_df, - ref_denom_df, - .in_ref_col, - curgrp_denom_df, - id, - variables, - conf_level, - method, - weights_method) { + levii, + df, + .var, + ref_df, + ref_denom_df, + .in_ref_col, + curgrp_denom_df, + id, + variables, + conf_level, + method, + weights_method +) { dfii <- df[df[[.var]] == levii & !is.na(df[[.var]]), ] ref_dfii <- ref_df[ref_df[[.var]] == levii & !is.na(ref_df[[.var]]), ] @@ -248,31 +250,32 @@ s_rel_risk_levii_j <- function( s_rel_risk_val_j <- function( - df, - .var, - .df_row, - ctrl_grp, - cur_trt_grp, - trt_var, - val = NULL, - drop_levels = FALSE, - excl_levels = NULL, - denom_df, - id = "USUBJID", - riskdiff = TRUE, - variables = list(strata = NULL), - conf_level = 0.95, - method = c( - "waldcc", - "wald", - "cmh", - "ha", - "newcombe", - "newcombecc", - "strat_newcombe", - "strat_newcombecc" - ), - weights_method = "cmh") { + df, + .var, + .df_row, + ctrl_grp, + cur_trt_grp, + trt_var, + val = NULL, + drop_levels = FALSE, + excl_levels = NULL, + denom_df, + id = "USUBJID", + riskdiff = TRUE, + variables = list(strata = NULL), + conf_level = 0.95, + method = c( + "waldcc", + "wald", + "cmh", + "ha", + "newcombe", + "newcombecc", + "strat_newcombe", + "strat_newcombecc" + ), + weights_method = "cmh" +) { if (drop_levels) { obs_levs <- unique(.df_row[[.var]]) obs_levs <- intersect(levels(.df_row[[.var]]), obs_levs) @@ -584,50 +587,51 @@ s_rel_risk_val_j <- function( #' the statistic is replaced by the relative risk difference + confidence interval. #' @export a_freq_j <- function( - df, - labelstr = NULL, - .var = NA, - val = NULL, - drop_levels = FALSE, - excl_levels = NULL, - new_levels = NULL, - new_levels_after = FALSE, - addstr2levs = NULL, - .df_row, - .spl_context, - .N_col, - id = "USUBJID", - denom = c("N_col", "n_df", "n_altdf", "N_colgroup", "n_rowdf", "n_parentdf"), - riskdiff = TRUE, - ref_path = NULL, - variables = list(strata = NULL), - conf_level = 0.95, - method = c( - "wald", - "waldcc", - "cmh", - "ha", - "newcombe", - "newcombecc", - "strat_newcombe", - "strat_newcombecc" - ), - weights_method = "cmh", - label = NULL, - label_fstr = NULL, - label_map = NULL, - .alt_df_full = NULL, - denom_by = NULL, - .stats = c("count_unique_denom_fraction"), - .formats = NULL, - .indent_mods = NULL, - na_str = rep("NA", 3), - .labels_n = NULL, - extrablankline = FALSE, - extrablanklineafter = NULL, - restr_columns = NULL, - colgroup = NULL, - countsource = c("df", "altdf")) { + df, + labelstr = NULL, + .var = NA, + val = NULL, + drop_levels = FALSE, + excl_levels = NULL, + new_levels = NULL, + new_levels_after = FALSE, + addstr2levs = NULL, + .df_row, + .spl_context, + .N_col, + id = "USUBJID", + denom = c("N_col", "n_df", "n_altdf", "N_colgroup", "n_rowdf", "n_parentdf"), + riskdiff = TRUE, + ref_path = NULL, + variables = list(strata = NULL), + conf_level = 0.95, + method = c( + "wald", + "waldcc", + "cmh", + "ha", + "newcombe", + "newcombecc", + "strat_newcombe", + "strat_newcombecc" + ), + weights_method = "cmh", + label = NULL, + label_fstr = NULL, + label_map = NULL, + .alt_df_full = NULL, + denom_by = NULL, + .stats = c("count_unique_denom_fraction"), + .formats = NULL, + .indent_mods = NULL, + na_str = rep("NA", 3), + .labels_n = NULL, + extrablankline = FALSE, + extrablanklineafter = NULL, + restr_columns = NULL, + colgroup = NULL, + countsource = c("df", "altdf") +) { denom <- match.arg(denom) method <- match.arg(method) @@ -886,19 +890,20 @@ a_freq_j <- function( #' `exclude_levels` argument, see `?do_exclude_split` for details. #' @export a_freq_j_with_exclude <- function( - df, - labelstr = NULL, - exclude_levels, - .var = NA, - .spl_context, - .df_row, - .N_col, - .alt_df_full = NULL, - .stats = "count_unique_denom_fraction", - .formats = NULL, - .indent_mods = NULL, - .labels_n = NULL, - ...) { + df, + labelstr = NULL, + exclude_levels, + .var = NA, + .spl_context, + .df_row, + .N_col, + .alt_df_full = NULL, + .stats = "count_unique_denom_fraction", + .formats = NULL, + .indent_mods = NULL, + .labels_n = NULL, + ... +) { if (do_exclude_split(exclude_levels, .spl_context)) { NULL } else { diff --git a/R/cmhrms.R b/R/cmhrms.R index 5f706b31..584a4a4a 100644 --- a/R/cmhrms.R +++ b/R/cmhrms.R @@ -148,18 +148,19 @@ a_cmhrms_j <- function(df, .var, #' @export #' @order 3 a_cmhrms_j_with_exclude <- function( - df, - exclude_levels, - .var, - .spl_context, - .ref_group, - .in_ref_col, - .df_row, - ..., - .stats = NULL, - .formats = NULL, - .indent_mods = NULL, - .labels = NULL) { + df, + exclude_levels, + .var, + .spl_context, + .ref_group, + .in_ref_col, + .df_row, + ..., + .stats = NULL, + .formats = NULL, + .indent_mods = NULL, + .labels = NULL +) { if (do_exclude_split(exclude_levels, .spl_context)) { NULL } else { diff --git a/R/h_freq_funs.R b/R/h_freq_funs.R index 18763157..87c727ec 100644 --- a/R/h_freq_funs.R +++ b/R/h_freq_funs.R @@ -332,19 +332,20 @@ h_get_trtvar_refpath <- function(ref_path, .spl_context, df) { #' @noRd #' @keywords internal h_upd_dfrow <- function( - df_row, - .var, - val, - excl_levels, - drop_levels, - new_levels, - new_levels_after, - addstr2levs, - label, - label_map, - labelstr, - label_fstr, - .spl_context) { + df_row, + .var, + val, + excl_levels, + drop_levels, + new_levels, + new_levels_after, + addstr2levs, + label, + label_map, + labelstr, + label_fstr, + .spl_context +) { if (!is.null(label) && !is.null(label_map)) { stop("a_freq_j: label and label_map cannot be used together.") } @@ -527,27 +528,28 @@ h_get_label_map <- function(.labels, label_map, .var, split_info) { #' @param .stats Statistics to compute. #' @return List containing prepared data frames and values. h_a_freq_dataprep <- function( - df, - labelstr = NULL, - .var = NA, - val = NULL, - drop_levels = FALSE, - excl_levels = NULL, - new_levels = NULL, - new_levels_after = FALSE, - addstr2levs = NULL, - .df_row, - .spl_context, - .N_col, - id = "USUBJID", - denom = c("N_col", "n_df", "n_altdf", "N_colgroup", "n_rowdf", "n_parentdf"), - variables, - label = NULL, - label_fstr = NULL, - label_map = NULL, - .alt_df_full = NULL, - denom_by = NULL, - .stats) { + df, + labelstr = NULL, + .var = NA, + val = NULL, + drop_levels = FALSE, + excl_levels = NULL, + new_levels = NULL, + new_levels_after = FALSE, + addstr2levs = NULL, + .df_row, + .spl_context, + .N_col, + id = "USUBJID", + denom = c("N_col", "n_df", "n_altdf", "N_colgroup", "n_rowdf", "n_parentdf"), + variables, + label = NULL, + label_fstr = NULL, + label_map = NULL, + .alt_df_full = NULL, + denom_by = NULL, + .stats +) { denom <- match.arg(denom) df <- df[!is.na(df[[.var]]), ] @@ -634,15 +636,16 @@ h_a_freq_dataprep <- function( #' @noRd #' @keywords internal h_a_freq_prepinrows <- function( - x_stats, - .stats_adj, - .formats, - labelstr, - label_fstr, - label, - .indent_mods, - .labels_n, - na_str) { + x_stats, + .stats_adj, + .formats, + labelstr, + label_fstr, + label, + .indent_mods, + .labels_n, + na_str +) { # Fill in formatting defaults x_stats <- x_stats[.stats_adj] diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 7bc06f05..8f393caf 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -37,10 +37,11 @@ #' format_value(value, fmt2, round_type = "iec", na_str = c("ne1", "ne2")) #' if (is.function(fmt2)) fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) jjcsformat_xx <- function( - str, - na_str = na_str_dflt, - na_str_dflt = "NE", - replace_na_dflt = TRUE) { + str, + na_str = na_str_dflt, + na_str_dflt = "NE", + replace_na_dflt = TRUE +) { if (grepl("xxx.", str, fixed = TRUE)) { stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") } diff --git a/R/junco_utils_default_stats_formats_labels.R b/R/junco_utils_default_stats_formats_labels.R index fa1345cc..c75a76b8 100644 --- a/R/junco_utils_default_stats_formats_labels.R +++ b/R/junco_utils_default_stats_formats_labels.R @@ -46,10 +46,11 @@ NULL #' #' @export junco_get_stats <- function( - method_groups = "analyze_vars_numeric", - stats_in = NULL, - custom_stats_in = NULL, - add_pval = FALSE) { + method_groups = "analyze_vars_numeric", + stats_in = NULL, + custom_stats_in = NULL, + add_pval = FALSE +) { tern_get_stats( method_groups = method_groups, stats_in = stats_in, @@ -96,10 +97,11 @@ junco_get_formats_from_stats <- function(stats, formats_in = NULL, levels_per_st #' #' @export junco_get_labels_from_stats <- function( - stats, - labels_in = NULL, - levels_per_stats = NULL, - label_attr_from_stats = NULL) { + stats, + labels_in = NULL, + levels_per_stats = NULL, + label_attr_from_stats = NULL +) { tern_get_labels_from_stats( stats = stats, labels_in = labels_in, diff --git a/R/split_functions.R b/R/split_functions.R index a47dd450..8918538f 100644 --- a/R/split_functions.R +++ b/R/split_functions.R @@ -233,14 +233,15 @@ resolve_ancestor_pos <- function(anc_pos, numrows) { #' #' stopifnot(identical(cell_values(tbl2), cell_values(tbl3))) cond_rm_facets <- function( - facets = NULL, - facets_regex = NULL, - ancestor_pos = 1, - split = NULL, - split_regex = NULL, - value = NULL, - value_regex = NULL, - keep_matches = FALSE) { + facets = NULL, + facets_regex = NULL, + ancestor_pos = 1, + split = NULL, + split_regex = NULL, + value = NULL, + value_regex = NULL, + keep_matches = FALSE +) { ## detect errors/miscalling which don't even require us to have the facets if (is.null(split) && is.null(split_regex) && is.null(value) && is.null(value_regex)) { stop( diff --git a/R/tabulate_lsmeans_wide.R b/R/tabulate_lsmeans_wide.R index 87d9b774..c455f996 100644 --- a/R/tabulate_lsmeans_wide.R +++ b/R/tabulate_lsmeans_wide.R @@ -125,15 +125,16 @@ lsmeans_wide_second_split_fun_fct <- function(pval_sided, conf_level, include_pv #' #' @keywords internal lsmeans_wide_cfun <- function( - df, - labelstr, - .spl_context, - variables, - ref_level, - treatment_levels, - pval_sided = c("2", "1", "-1"), - conf_level, - formats) { + df, + labelstr, + .spl_context, + variables, + ref_level, + treatment_levels, + pval_sided = c("2", "1", "-1"), + conf_level, + formats +) { this_col_split <- .spl_context[nrow(.spl_context), "cur_col_split_val"][[1]] pad_in_rows <- pad_in_rows_fct(length_out = length(treatment_levels), label = labelstr) if (this_col_split[1] %in% c("reference_group", "testing_group")) { @@ -221,23 +222,24 @@ lsmeans_wide_cfun <- function( #' ) |> #' build_table(df = anl) summarize_lsmeans_wide <- function( - lyt, - variables, - ref_level, - treatment_levels, - conf_level, - pval_sided = "2", - include_variance = TRUE, - include_pval = TRUE, - formats = list( - lsmean = jjcsformat_xx("xx.x"), - mse = jjcsformat_xx("xx.x"), - df = jjcsformat_xx("xx."), - lsmean_diff = jjcsformat_xx("xx.x"), - se = jjcsformat_xx("xx.xx"), - ci = jjcsformat_xx("(xx.xx, xx.xx)"), - pval = jjcsformat_pval_fct(0) - )) { + lyt, + variables, + ref_level, + treatment_levels, + conf_level, + pval_sided = "2", + include_variance = TRUE, + include_pval = TRUE, + formats = list( + lsmean = jjcsformat_xx("xx.x"), + mse = jjcsformat_xx("xx.x"), + df = jjcsformat_xx("xx."), + lsmean_diff = jjcsformat_xx("xx.x"), + se = jjcsformat_xx("xx.xx"), + ci = jjcsformat_xx("(xx.xx, xx.xx)"), + pval = jjcsformat_pval_fct(0) + ) +) { # Check that all required format elements are present in the formats parameter checkmate::assert_names( names(formats), diff --git a/R/tt_to_tblfile.R b/R/tt_to_tblfile.R index 2f7f545e..076bffab 100644 --- a/R/tt_to_tblfile.R +++ b/R/tt_to_tblfile.R @@ -7,10 +7,11 @@ #' @return `tt` represented as a `tbl` data.frame suitable for passing #' to [tidytlg::gentlg] via the `huxme` argument. tt_to_tbldf <- function( - tt, - fontspec = font_spec("Times", 9L, 1), - string_map = default_str_map, - markup_df = dps_markup_df) { + tt, + fontspec = font_spec("Times", 9L, 1), + string_map = default_str_map, + markup_df = dps_markup_df +) { if (!validate_table_struct(tt)) { stop( "invalid table structure. summarize_row_groups without ", @@ -108,9 +109,10 @@ tlg_type <- function(tt) { } mpf_to_colspan <- function( - mpf, - string_map = default_str_map, - markup_df = dps_markup_df) { + mpf, + string_map = default_str_map, + markup_df = dps_markup_df +) { if (!methods::is(mpf, "MatrixPrintForm")) { stop("figure out how to make this an mpf (MatrixPrintForm) first.") } @@ -271,38 +273,39 @@ get_ncol <- function(tt) { #' @return If `file` is non-NULL, this is called for the side-effect of writing #' one or more RTF files. Otherwise, returns a list of `huxtable` objects. tt_to_tlgrtf <- function( + tt, + file = NULL, + orientation = c("portrait", "landscape"), + colwidths = def_colwidths( tt, - file = NULL, - orientation = c("portrait", "landscape"), - colwidths = def_colwidths( - tt, - fontspec, - col_gap = col_gap, - label_width_ins = label_width_ins, - type = tlgtype - ), - label_width_ins = 2, - watermark = NULL, - pagenum = ifelse(tlgtype == "Listing", TRUE, FALSE), - fontspec = font_spec("Times", 9L, 1.2), - pg_width = pg_width_by_orient(orientation == "landscape"), - margins = c(0, 0, 0, 0), - paginate = tlg_type(tt) == "Table", - col_gap = ifelse(tlgtype == "Listing", .5, 3), - nosplitin = list( - row = character(), - col = character() - ), - verbose = FALSE, - tlgtype = tlg_type(tt), - string_map = default_str_map, - markup_df = dps_markup_df, - combined_rtf = FALSE, - one_table = TRUE, - border_mat = make_header_bordmat(obj = tt), - round_type = obj_round_type(tt), - validate = TRUE, - ...) { + fontspec, + col_gap = col_gap, + label_width_ins = label_width_ins, + type = tlgtype + ), + label_width_ins = 2, + watermark = NULL, + pagenum = ifelse(tlgtype == "Listing", TRUE, FALSE), + fontspec = font_spec("Times", 9L, 1.2), + pg_width = pg_width_by_orient(orientation == "landscape"), + margins = c(0, 0, 0, 0), + paginate = tlg_type(tt) == "Table", + col_gap = ifelse(tlgtype == "Listing", .5, 3), + nosplitin = list( + row = character(), + col = character() + ), + verbose = FALSE, + tlgtype = tlg_type(tt), + string_map = default_str_map, + markup_df = dps_markup_df, + combined_rtf = FALSE, + one_table = TRUE, + border_mat = make_header_bordmat(obj = tt), + round_type = obj_round_type(tt), + validate = TRUE, + ... +) { # Validate table structure if requested and not disabled by environment variable # nolint start if (validate && tlgtype == "Table" && methods::is(tt, "VTableTree") && @@ -705,8 +708,9 @@ fixup_bord_mat <- function(brdmat, hstrs) { } .make_header_bordmat <- function( - obj, - mpf = matrix_form(utils::head(obj, 1), expand_newlines = FALSE)) { + obj, + mpf = matrix_form(utils::head(obj, 1), expand_newlines = FALSE) +) { spns <- mf_spans(mpf) nlh <- mf_nlheader(mpf) nrh <- mf_nrheader(mpf) From 213c7cede9b134f28bcc938c57629832737dd4fd Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 12:57:15 +0000 Subject: [PATCH 20/49] [skip roxygen] [skip vbump] Roxygen Man Pages Auto Update --- man/rbmi_analyse.Rd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/man/rbmi_analyse.Rd b/man/rbmi_analyse.Rd index ec8c6b91..5f3d8613 100644 --- a/man/rbmi_analyse.Rd +++ b/man/rbmi_analyse.Rd @@ -61,7 +61,7 @@ via \code{...}. \code{fun} must return a named list with each element itself being a list containing a single numeric element called \code{est} (or additionally \code{se} and \code{df} if -you had originally specified \code{\link[rbmi:method]{rbmi::method_bayes()}} or \code{\link[rbmi:method]{rbmi::method_approxbayes()}}) +you had originally specified \code{\link[rbmi:method_bayes]{rbmi::method_bayes()}} or \code{\link[rbmi:method_approxbayes]{rbmi::method_approxbayes()}}) i.e.: \preformatted{ myfun <- function(dat, ...) { From dded592c7756ced323218cabe21e2e10a94e19d2 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Tue, 2 Dec 2025 13:09:38 +0000 Subject: [PATCH 21/49] lintr --- R/split_functions.R | 17 ++++++++--------- tests/testthat/test-jjcsformats.R | 4 ++-- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/R/split_functions.R b/R/split_functions.R index 8918538f..a47dd450 100644 --- a/R/split_functions.R +++ b/R/split_functions.R @@ -233,15 +233,14 @@ resolve_ancestor_pos <- function(anc_pos, numrows) { #' #' stopifnot(identical(cell_values(tbl2), cell_values(tbl3))) cond_rm_facets <- function( - facets = NULL, - facets_regex = NULL, - ancestor_pos = 1, - split = NULL, - split_regex = NULL, - value = NULL, - value_regex = NULL, - keep_matches = FALSE -) { + facets = NULL, + facets_regex = NULL, + ancestor_pos = 1, + split = NULL, + split_regex = NULL, + value = NULL, + value_regex = NULL, + keep_matches = FALSE) { ## detect errors/miscalling which don't even require us to have the facets if (is.null(split) && is.null(split_regex) && is.null(value) && is.null(value_regex)) { stop( diff --git a/tests/testthat/test-jjcsformats.R b/tests/testthat/test-jjcsformats.R index fdd95cb8..d9a5057b 100644 --- a/tests/testthat/test-jjcsformats.R +++ b/tests/testthat/test-jjcsformats.R @@ -195,8 +195,8 @@ test_that("some special cases for jjcsformat_pval_fct", { "jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001." ) expect_error( - format_value(NA_real_, format = jjcsformat_pval_fct(0.0005), na_str = "NE")? - "jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001." + format_value(NA_real_, format = jjcsformat_pval_fct(0.0005), na_str = "NE"), + "jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001." ) }) From c07351db3eec0a31b1c467e09f294bd4399e60a8 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 13:12:17 +0000 Subject: [PATCH 22/49] [skip style] [skip vbump] Restyle files --- R/split_functions.R | 17 +++++++++-------- tests/testthat/test-jjcsformats.R | 2 +- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/R/split_functions.R b/R/split_functions.R index a47dd450..8918538f 100644 --- a/R/split_functions.R +++ b/R/split_functions.R @@ -233,14 +233,15 @@ resolve_ancestor_pos <- function(anc_pos, numrows) { #' #' stopifnot(identical(cell_values(tbl2), cell_values(tbl3))) cond_rm_facets <- function( - facets = NULL, - facets_regex = NULL, - ancestor_pos = 1, - split = NULL, - split_regex = NULL, - value = NULL, - value_regex = NULL, - keep_matches = FALSE) { + facets = NULL, + facets_regex = NULL, + ancestor_pos = 1, + split = NULL, + split_regex = NULL, + value = NULL, + value_regex = NULL, + keep_matches = FALSE +) { ## detect errors/miscalling which don't even require us to have the facets if (is.null(split) && is.null(split_regex) && is.null(value) && is.null(value_regex)) { stop( diff --git a/tests/testthat/test-jjcsformats.R b/tests/testthat/test-jjcsformats.R index d9a5057b..c5c0b394 100644 --- a/tests/testthat/test-jjcsformats.R +++ b/tests/testthat/test-jjcsformats.R @@ -196,7 +196,7 @@ test_that("some special cases for jjcsformat_pval_fct", { ) expect_error( format_value(NA_real_, format = jjcsformat_pval_fct(0.0005), na_str = "NE"), - "jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001." + "jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001." ) }) From c8dba05255214a8da13ec49cf3ad8f4c4454df58 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Tue, 2 Dec 2025 13:14:25 +0000 Subject: [PATCH 23/49] address spelling --- R/tt_to_tblfile.R | 2 +- inst/WORDLIST | 3 +++ man/rbmi_analyse.Rd | 2 +- man/tt_to_tlgrtf.Rd | 2 +- 4 files changed, 6 insertions(+), 3 deletions(-) diff --git a/R/tt_to_tblfile.R b/R/tt_to_tblfile.R index 076bffab..482fa3a8 100644 --- a/R/tt_to_tblfile.R +++ b/R/tt_to_tblfile.R @@ -252,7 +252,7 @@ get_ncol <- function(tt) { #' for what the matrix should contain. Users should only specify this when the #' default behavior does not meet their needs. #' @param round_type (`"iec"` or `"sas"`)\cr the type of rounding to perform. iec, -#' the default, peforms rounding compliant with IEC 60559, while +#' the default, performs rounding compliant with IEC 60559, while #' sas performs nearest-value rounding consistent with rounding within SAS. #' See `[formatters::format_value()]` for more details. #' @param validate logical(1). Whether to validate the table structure using diff --git a/inst/WORDLIST b/inst/WORDLIST index 40328563..4a0e8bee 100644 --- a/inst/WORDLIST +++ b/inst/WORDLIST @@ -78,6 +78,8 @@ grp Haenszel https ie +IEC +iec imputeObj inheritParams inriskdiffcol @@ -159,6 +161,7 @@ rtables rtf RTF rtfs +sas savse scorefun sd diff --git a/man/rbmi_analyse.Rd b/man/rbmi_analyse.Rd index 5f3d8613..ec8c6b91 100644 --- a/man/rbmi_analyse.Rd +++ b/man/rbmi_analyse.Rd @@ -61,7 +61,7 @@ via \code{...}. \code{fun} must return a named list with each element itself being a list containing a single numeric element called \code{est} (or additionally \code{se} and \code{df} if -you had originally specified \code{\link[rbmi:method_bayes]{rbmi::method_bayes()}} or \code{\link[rbmi:method_approxbayes]{rbmi::method_approxbayes()}}) +you had originally specified \code{\link[rbmi:method]{rbmi::method_bayes()}} or \code{\link[rbmi:method]{rbmi::method_approxbayes()}}) i.e.: \preformatted{ myfun <- function(dat, ...) { diff --git a/man/tt_to_tlgrtf.Rd b/man/tt_to_tlgrtf.Rd index ef27835c..aa0e9124 100644 --- a/man/tt_to_tlgrtf.Rd +++ b/man/tt_to_tlgrtf.Rd @@ -85,7 +85,7 @@ for what the matrix should contain. Users should only specify this when the default behavior does not meet their needs.} \item{round_type}{(\code{"iec"} or \code{"sas"})\cr the type of rounding to perform. iec, -the default, peforms rounding compliant with IEC 60559, while +the default, performs rounding compliant with IEC 60559, while sas performs nearest-value rounding consistent with rounding within SAS. See \verb{[formatters::format_value()]} for more details.} From 0b08970ba207930a22c57efdcd9b11651461fc7d Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 13:19:20 +0000 Subject: [PATCH 24/49] [skip roxygen] [skip vbump] Roxygen Man Pages Auto Update --- man/rbmi_analyse.Rd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/man/rbmi_analyse.Rd b/man/rbmi_analyse.Rd index ec8c6b91..5f3d8613 100644 --- a/man/rbmi_analyse.Rd +++ b/man/rbmi_analyse.Rd @@ -61,7 +61,7 @@ via \code{...}. \code{fun} must return a named list with each element itself being a list containing a single numeric element called \code{est} (or additionally \code{se} and \code{df} if -you had originally specified \code{\link[rbmi:method]{rbmi::method_bayes()}} or \code{\link[rbmi:method]{rbmi::method_approxbayes()}}) +you had originally specified \code{\link[rbmi:method_bayes]{rbmi::method_bayes()}} or \code{\link[rbmi:method_approxbayes]{rbmi::method_approxbayes()}}) i.e.: \preformatted{ myfun <- function(dat, ...) { From 933137fd98c25b050ab1b830c5194eb15d31550d Mon Sep 17 00:00:00 2001 From: iaugusty Date: Tue, 2 Dec 2025 13:53:06 +0000 Subject: [PATCH 25/49] remove 1 test, covered above --- tests/testthat/test-jjcsformats.R | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/testthat/test-jjcsformats.R b/tests/testthat/test-jjcsformats.R index c5c0b394..be49dab0 100644 --- a/tests/testthat/test-jjcsformats.R +++ b/tests/testthat/test-jjcsformats.R @@ -194,10 +194,6 @@ test_that("some special cases for jjcsformat_pval_fct", { format_value(0.00000123, format = jjcsformat_pval_fct(0.0005), na_str = "NE"), "jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001." ) - expect_error( - format_value(NA_real_, format = jjcsformat_pval_fct(0.0005), na_str = "NE"), - "jjcsformat_pval_fct: argument alpha should be 0 or at least 0.001." - ) }) test_that("jjcsformat_xx works also for empty cells", { From 6f0587efaf7b29e2b871e8567c05e62f18765cb2 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Tue, 2 Dec 2025 14:13:22 +0000 Subject: [PATCH 26/49] update ref to formatting functions --- _pkgdown.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/_pkgdown.yml b/_pkgdown.yml index 654cb48a..2f3aae40 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -47,7 +47,6 @@ reference: - c_proportion_logical - cmp_cfun - cmp_post_fun - - count_fraction - find_missing_chg_after_avisit - fit_ancova - fit_mmrm_j @@ -90,10 +89,6 @@ reference: - jjcsformat_xx - jjcs_num_formats - jjcsformat_count_denom_fraction - - format_xx_fct - - jjcsformat_fraction_count_denom - - jjcsformat_pval_fct - - jjcsformat_range_fct - insert_blank_line - title: junco Sorting Functions and Pruning Functions From f553161619ce6887ca91a68db0f780261271b360 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Tue, 2 Dec 2025 15:50:55 +0000 Subject: [PATCH 27/49] roxygen update + test update --- R/jjcsformats.R | 18 +++++++++++++++--- man/count_fraction.Rd | 11 +++++++++++ man/jjcsformat_xx.Rd | 2 ++ man/rbmi_analyse.Rd | 2 +- tests/testthat/_snaps/jjcsformats.md | 12 ++++++++++++ tests/testthat/test-jjcsformats.R | 3 +++ 6 files changed, 44 insertions(+), 4 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 8f393caf..e25c162d 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -11,6 +11,7 @@ #' the formatters framework be overridden by `na_str_default`? Defaults to #' `TRUE`, as a way to have a different default na string behavior from the #' base `formatters` framework. +#' @param na_str String for NA values. #' @return Either a supported format string, or a formatting function that can be #' used as format in `formatters::format_value` #' @family JJCS formatting functions @@ -63,13 +64,13 @@ jjcsformat_xx <- function( function(x, na_str, round_type) { - if (fmt %in% formatters:::formats_1d) { + if (fmt %in% list_valid_format_labels()$`1d`) { res <- format_value(x, fmt, na_str = na_str[1], round_type = round_type ) - } else if (fmt %in% c("xx.xxxxx", "xx.xxxxxx", "xx.xxxxxxx")) { + } else if (fmt %in% paste0("xx.", strrep("x", times = 5:12))) { # p-value fmt sometimes might need more digits d <- nchar(sub(".*\\.", "", fmt)) res <- round_fmt(x, digits = d, round_type = round_type, na_str = na_str[1]) @@ -126,6 +127,13 @@ jjcsformat_xx <- function( #' @param x (`numeric vector`)\cr Vector with elements `num` and `fraction` or `num`, `denom` and `fraction`. #' @param d (`numeric(1)`)\cr Number of digits to round fraction to (default = 1) #' @param ... Additional arguments passed to other methods. +#' @param type (`character(1`)\cr One of `count_fraction`, `count_denom_fraction`, `fraction_count_denom`, +#' to specify the type of format the function will represent. +#' @param verbose (`logical`)\cr Whether to print verbose output +#' @param round_type (`character(1)`)\cr the type of rounding to perform. +#' See [formatters::format_value()] for more details. +#' @param output (`string`)\cr output type. +#' See [formatters::format_value()] for more details. #' @return A formatting function to format input into string in the format `count / denom (ratio percent)`. If `count` #' is 0, the format is `0`. If fraction is >0.99, the format is #' `count / denom (>99.9 percent)` @@ -292,12 +300,16 @@ jjcsformat_pval_fct <- function(alpha = 0.05) { ">0.999" } else { res <- format_value(x, jjcsformat_xx(xx_format), round_type = round_type) - while (as.numeric(res) == alpha && x < alpha) { + while (as.numeric(res) == alpha && x < alpha && + xx_format != paste0("xx.", strrep("x", times = 10))) { # Increase precision by 1 digit until the result # is different from threshold alpha. xx_format <- paste0(xx_format, "x") res <- format_value(x, jjcsformat_xx(xx_format), round_type = round_type) } + if (xx_format == paste0("xx.", strrep("x", times = 10))){ + # produce message eg "stopped increasing precision for p-value"? + } res } } diff --git a/man/count_fraction.Rd b/man/count_fraction.Rd index 75981369..a79d7e73 100644 --- a/man/count_fraction.Rd +++ b/man/count_fraction.Rd @@ -23,8 +23,19 @@ jjcsformat_fraction_count_denom(x, round_type = c("sas", "iec"), output, ...) \arguments{ \item{d}{(\code{numeric(1)})\cr Number of digits to round fraction to (default = 1)} +\item{type}{(\verb{character(1})\cr One of \code{count_fraction}, \code{count_denom_fraction}, \code{fraction_count_denom}, +to specify the type of format the function will represent.} + +\item{verbose}{(\code{logical})\cr Whether to print verbose output} + \item{x}{(\verb{numeric vector})\cr Vector with elements \code{num} and \code{fraction} or \code{num}, \code{denom} and \code{fraction}.} +\item{round_type}{(\code{character(1)})\cr the type of rounding to perform. +See \code{\link[formatters:format_value]{formatters::format_value()}} for more details.} + +\item{output}{(\code{string})\cr output type. +See \code{\link[formatters:format_value]{formatters::format_value()}} for more details.} + \item{...}{Additional arguments passed to other methods.} } \value{ diff --git a/man/jjcsformat_xx.Rd b/man/jjcsformat_xx.Rd index 70e60700..ec0062fc 100644 --- a/man/jjcsformat_xx.Rd +++ b/man/jjcsformat_xx.Rd @@ -21,6 +21,8 @@ jjcsformat_range_fct(str) \item{str}{(\code{string})\cr the format specifying the number of digits to be used, for the range values, e.g. \code{"xx.xx"}.} +\item{na_str}{String for NA values.} + \item{na_str_dflt}{Character to represent NA value} \item{replace_na_dflt}{logical(1). Should an \code{na_string} of "NA" within diff --git a/man/rbmi_analyse.Rd b/man/rbmi_analyse.Rd index 5f3d8613..ec8c6b91 100644 --- a/man/rbmi_analyse.Rd +++ b/man/rbmi_analyse.Rd @@ -61,7 +61,7 @@ via \code{...}. \code{fun} must return a named list with each element itself being a list containing a single numeric element called \code{est} (or additionally \code{se} and \code{df} if -you had originally specified \code{\link[rbmi:method_bayes]{rbmi::method_bayes()}} or \code{\link[rbmi:method_approxbayes]{rbmi::method_approxbayes()}}) +you had originally specified \code{\link[rbmi:method]{rbmi::method_bayes()}} or \code{\link[rbmi:method]{rbmi::method_approxbayes()}}) i.e.: \preformatted{ myfun <- function(dat, ...) { diff --git a/tests/testthat/_snaps/jjcsformats.md b/tests/testthat/_snaps/jjcsformats.md index 6a4acf60..72f3200b 100644 --- a/tests/testthat/_snaps/jjcsformats.md +++ b/tests/testthat/_snaps/jjcsformats.md @@ -258,4 +258,16 @@ jjcsformat_pval_fct(5e-04)(NA_real_, na_str = "ne") Output [1] "ne" + Code + jjcsformat_pval_fct(0.005)(0.004999999) + Output + [1] "0.004999999" + Code + jjcsformat_pval_fct(0.005)(0.0049999999) + Output + [1] "0.0049999999" + Code + jjcsformat_pval_fct(0.005)(0.00499999999) + Output + [1] "0.0050000000" diff --git a/tests/testthat/test-jjcsformats.R b/tests/testthat/test-jjcsformats.R index be49dab0..f86636d7 100644 --- a/tests/testthat/test-jjcsformats.R +++ b/tests/testthat/test-jjcsformats.R @@ -178,6 +178,9 @@ test_that("jjcsformat_pval_fct works", { jjcsformat_pval_fct(0)(0.9990000001) jjcsformat_pval_fct(0)(NA_real_, na_str = "ne") jjcsformat_pval_fct(0.0005)(NA_real_, na_str = "ne") + jjcsformat_pval_fct(0.005)(0.004999999) + jjcsformat_pval_fct(0.005)(0.0049999999) + jjcsformat_pval_fct(0.005)(0.00499999999) }) }) From 936663d126541166b4b0ce4ac9eee338abbdebc5 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 15:53:37 +0000 Subject: [PATCH 28/49] [skip style] [skip vbump] Restyle files --- R/jjcsformats.R | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index e25c162d..fbbf7fa6 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -11,7 +11,7 @@ #' the formatters framework be overridden by `na_str_default`? Defaults to #' `TRUE`, as a way to have a different default na string behavior from the #' base `formatters` framework. -#' @param na_str String for NA values. +#' @param na_str String for NA values. #' @return Either a supported format string, or a formatting function that can be #' used as format in `formatters::format_value` #' @family JJCS formatting functions @@ -127,13 +127,13 @@ jjcsformat_xx <- function( #' @param x (`numeric vector`)\cr Vector with elements `num` and `fraction` or `num`, `denom` and `fraction`. #' @param d (`numeric(1)`)\cr Number of digits to round fraction to (default = 1) #' @param ... Additional arguments passed to other methods. -#' @param type (`character(1`)\cr One of `count_fraction`, `count_denom_fraction`, `fraction_count_denom`, +#' @param type (`character(1`)\cr One of `count_fraction`, `count_denom_fraction`, `fraction_count_denom`, #' to specify the type of format the function will represent. #' @param verbose (`logical`)\cr Whether to print verbose output -#' @param round_type (`character(1)`)\cr the type of rounding to perform. +#' @param round_type (`character(1)`)\cr the type of rounding to perform. #' See [formatters::format_value()] for more details. #' @param output (`string`)\cr output type. -#' See [formatters::format_value()] for more details. +#' See [formatters::format_value()] for more details. #' @return A formatting function to format input into string in the format `count / denom (ratio percent)`. If `count` #' is 0, the format is `0`. If fraction is >0.99, the format is #' `count / denom (>99.9 percent)` @@ -300,14 +300,14 @@ jjcsformat_pval_fct <- function(alpha = 0.05) { ">0.999" } else { res <- format_value(x, jjcsformat_xx(xx_format), round_type = round_type) - while (as.numeric(res) == alpha && x < alpha && - xx_format != paste0("xx.", strrep("x", times = 10))) { + while (as.numeric(res) == alpha && x < alpha && + xx_format != paste0("xx.", strrep("x", times = 10))) { # Increase precision by 1 digit until the result # is different from threshold alpha. xx_format <- paste0(xx_format, "x") res <- format_value(x, jjcsformat_xx(xx_format), round_type = round_type) } - if (xx_format == paste0("xx.", strrep("x", times = 10))){ + if (xx_format == paste0("xx.", strrep("x", times = 10))) { # produce message eg "stopped increasing precision for p-value"? } res From f6f8e80fe82184ebfba26c4b1a80d38ca216bd24 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 15:55:31 +0000 Subject: [PATCH 29/49] [skip roxygen] [skip vbump] Roxygen Man Pages Auto Update --- man/rbmi_analyse.Rd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/man/rbmi_analyse.Rd b/man/rbmi_analyse.Rd index ec8c6b91..5f3d8613 100644 --- a/man/rbmi_analyse.Rd +++ b/man/rbmi_analyse.Rd @@ -61,7 +61,7 @@ via \code{...}. \code{fun} must return a named list with each element itself being a list containing a single numeric element called \code{est} (or additionally \code{se} and \code{df} if -you had originally specified \code{\link[rbmi:method]{rbmi::method_bayes()}} or \code{\link[rbmi:method]{rbmi::method_approxbayes()}}) +you had originally specified \code{\link[rbmi:method_bayes]{rbmi::method_bayes()}} or \code{\link[rbmi:method_approxbayes]{rbmi::method_approxbayes()}}) i.e.: \preformatted{ myfun <- function(dat, ...) { From f65e676986c163ea630195d388f56c3e57b5fa83 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Tue, 2 Dec 2025 17:39:56 +0000 Subject: [PATCH 30/49] fix styler --- R/jjcsformats.R | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index fbbf7fa6..4ac57d9e 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -38,11 +38,10 @@ #' format_value(value, fmt2, round_type = "iec", na_str = c("ne1", "ne2")) #' if (is.function(fmt2)) fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) jjcsformat_xx <- function( - str, - na_str = na_str_dflt, - na_str_dflt = "NE", - replace_na_dflt = TRUE -) { + str, + na_str = na_str_dflt, + na_str_dflt = "NE", + replace_na_dflt = TRUE) { if (grepl("xxx.", str, fixed = TRUE)) { stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") } From 8165e59bb94c3d32cb930fa4facebfe952436960 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 2 Dec 2025 17:42:37 +0000 Subject: [PATCH 31/49] [skip style] [skip vbump] Restyle files --- R/jjcsformats.R | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 4ac57d9e..fbbf7fa6 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -38,10 +38,11 @@ #' format_value(value, fmt2, round_type = "iec", na_str = c("ne1", "ne2")) #' if (is.function(fmt2)) fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) jjcsformat_xx <- function( - str, - na_str = na_str_dflt, - na_str_dflt = "NE", - replace_na_dflt = TRUE) { + str, + na_str = na_str_dflt, + na_str_dflt = "NE", + replace_na_dflt = TRUE +) { if (grepl("xxx.", str, fixed = TRUE)) { stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") } From ae985c74aa1018c13bc63b2daad545795ecba695 Mon Sep 17 00:00:00 2001 From: iaugusty Date: Thu, 4 Dec 2025 12:56:31 +0000 Subject: [PATCH 32/49] set valid_round_type as defaults for round_type --- R/column_stats.R | 2 +- R/jjcsformats.R | 13 ++++++------- man/count_fraction.Rd | 6 +++--- .../junco_utils_default_stats_formats_labels.md | 2 +- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/R/column_stats.R b/R/column_stats.R index a2dbb9a7..c7ae9e9d 100644 --- a/R/column_stats.R +++ b/R/column_stats.R @@ -1,4 +1,4 @@ -calc_one_visit <- function(datvec, decimal, statnm, visit, varnm, round_type = c("sas", "iec"), exclude_visits, +calc_one_visit <- function(datvec, decimal, statnm, visit, varnm, round_type = valid_round_type, exclude_visits, var_names = c("AVAL", "CHG", "BASE")) { round_type <- match.arg(round_type) if (is.na(decimal)) { diff --git a/R/jjcsformats.R b/R/jjcsformats.R index fbbf7fa6..9bc87351 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -38,11 +38,10 @@ #' format_value(value, fmt2, round_type = "iec", na_str = c("ne1", "ne2")) #' if (is.function(fmt2)) fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) jjcsformat_xx <- function( - str, - na_str = na_str_dflt, - na_str_dflt = "NE", - replace_na_dflt = TRUE -) { + str, + na_str = na_str_dflt, + na_str_dflt = "NE", + replace_na_dflt = TRUE) { if (grepl("xxx.", str, fixed = TRUE)) { stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") } @@ -86,7 +85,7 @@ jjcsformat_xx <- function( rtable_format <- function(x, output, - round_type = c("sas", "iec"), + round_type = valid_round_type, na_str = na_str_dflt) { if (anyNA(na_str) || (replace_na_dflt && any(na_str == "NA"))) { na_inds <- which(is.na(na_str) | (replace_na_dflt & na_str == "NA")) @@ -147,7 +146,7 @@ jjcsformat_cnt_den_fract_fct <- function(d = 1, type <- match.arg(type) function(x, - round_type = c("sas", "iec"), + round_type = valid_round_type, output, ...) { obj_label(x) <- NULL diff --git a/man/count_fraction.Rd b/man/count_fraction.Rd index a79d7e73..a41deb8b 100644 --- a/man/count_fraction.Rd +++ b/man/count_fraction.Rd @@ -14,11 +14,11 @@ jjcsformat_cnt_den_fract_fct( verbose = FALSE ) -jjcsformat_count_fraction(x, round_type = c("sas", "iec"), output, ...) +jjcsformat_count_fraction(x, round_type = valid_round_type, output, ...) -jjcsformat_count_denom_fraction(x, round_type = c("sas", "iec"), output, ...) +jjcsformat_count_denom_fraction(x, round_type = valid_round_type, output, ...) -jjcsformat_fraction_count_denom(x, round_type = c("sas", "iec"), output, ...) +jjcsformat_fraction_count_denom(x, round_type = valid_round_type, output, ...) } \arguments{ \item{d}{(\code{numeric(1)})\cr Number of digits to round fraction to (default = 1)} diff --git a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md index 1b669701..965c495e 100644 --- a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md +++ b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md @@ -14,7 +14,7 @@ $quantiles_upper function(x, output, - round_type = c("sas", "iec"), + round_type = valid_round_type, na_str = na_str_dflt) { if (anyNA(na_str) || (replace_na_dflt && any(na_str == "NA"))) { na_inds <- which(is.na(na_str) | (replace_na_dflt & na_str == "NA")) From 3c527a0c7761965470656adaefcaddb6675f0855 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 4 Dec 2025 12:59:14 +0000 Subject: [PATCH 33/49] [skip style] [skip vbump] Restyle files --- R/jjcsformats.R | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 9bc87351..e5c822dd 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -38,10 +38,11 @@ #' format_value(value, fmt2, round_type = "iec", na_str = c("ne1", "ne2")) #' if (is.function(fmt2)) fmt2(value, round_type = "iec", na_str = c("ne1", "ne2")) jjcsformat_xx <- function( - str, - na_str = na_str_dflt, - na_str_dflt = "NE", - replace_na_dflt = TRUE) { + str, + na_str = na_str_dflt, + na_str_dflt = "NE", + replace_na_dflt = TRUE +) { if (grepl("xxx.", str, fixed = TRUE)) { stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") } From 2c6cc0208b2492eee9c6a827ddbfda23929b2fe8 Mon Sep 17 00:00:00 2001 From: munoztd0 Date: Thu, 4 Dec 2025 14:02:50 +0100 Subject: [PATCH 34/49] fix: codepencies --- DESCRIPTION | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/DESCRIPTION b/DESCRIPTION index e5ce0649..89a814b6 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -72,3 +72,7 @@ Suggests: pharmaverseadamjnj VignetteBuilder: knitr Config/testthat/edition: 3 +Remotes: + insightsengineering/formatters@1040_rtables_round_type, + insightsengineering/rtables@1040_rtables_round_type, + insightsengineering/rlistings@1040_rtables_round_type From 4ad1f1396a325fd9d5c2c29710b31ddef9b9cd79 Mon Sep 17 00:00:00 2001 From: munoztd0 Date: Thu, 4 Dec 2025 14:05:41 +0100 Subject: [PATCH 35/49] fix: lint --- R/jjcsformats.R | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index e5c822dd..7909258b 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -299,12 +299,12 @@ jjcsformat_pval_fct <- function(alpha = 0.05) { } else if (x > 0.999) { ">0.999" } else { - res <- format_value(x, jjcsformat_xx(xx_format), round_type = round_type) + res <- format_value(x, jjcsformat_xx(xx_format), round_type = round_type) # nolint start while (as.numeric(res) == alpha && x < alpha && xx_format != paste0("xx.", strrep("x", times = 10))) { # Increase precision by 1 digit until the result # is different from threshold alpha. - xx_format <- paste0(xx_format, "x") + xx_format <- paste0(xx_format, "x") # nolint end res <- format_value(x, jjcsformat_xx(xx_format), round_type = round_type) } if (xx_format == paste0("xx.", strrep("x", times = 10))) { From 91a511b1fd1e87e5f0338fc21b3da749177a8cdf Mon Sep 17 00:00:00 2001 From: munoztd0 Date: Thu, 4 Dec 2025 14:12:19 +0100 Subject: [PATCH 36/49] fix: .rtf .Rbuildignore --- .Rbuildignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.Rbuildignore b/.Rbuildignore index c4ba50d6..dc6f13a7 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -47,6 +47,5 @@ pkgdown ^output$ ^junco_tlg_template_scripts$ ^[^/]*\.R$ -^.*\.rtf$ ^CHANGELOG\.md$ ^air\.toml$ \ No newline at end of file From fe9f7c359518f0b7b54afe7e9bcc8516d0a183a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Mu=C3=B1oz=20Tord?= Date: Thu, 11 Dec 2025 15:18:15 +0100 Subject: [PATCH 37/49] 116 tt to tbldf function behavior with invalid structures (#124) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * a_maxlev fun update: `denom_df` argument replaced by `.alt_df_full`. (#114) * replace denom_df arg with .alt_df_full filled by the rtables machinery * changelog update --------- Co-authored-by: David Muñoz Tord * Added `a_two_tier()` analysis function (#122) * first commit. just new add. * new commit - two_tier_afun. * added a_two_tier + unit tests * new unit tests + CHANGELOG update * _pkgdown.yml update * spellcheck update * update: add _pkgdown.yml a_two_tier --------- Co-authored-by: munoztd0 * address bug #120: a_freq_j() - risk diff. Suspected results in the presence of upper row-split and no alt_counts_df (#123) * resolve bug and add test * [skip style] [skip vbump] Restyle files * Trigger CI: empty commit --------- Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * Release rbmi suggests (#112) * Main ci cd (#53) * Update README.md withnew links * Update inspect.yaml to check * Update pkgdown.yaml to Docs * Update README.md * Add hexagon sticker (#38) * Generate pkgdown site favicon * add logo to inspect unit reports --------- Co-authored-by: RMao6 * Issue and PR templates (#60) * issue and PR templates * Update pkgdown.yaml stop running pkgdown for draft PRs * Submitted to cran 0.1.1 (#75) * update version (finally x.x) * merge main (#72) * Update NEWS.md * fix: spellcheck() * ci.cd: add coverage report comment to PR * test: add coverage ignore * Moving out from old CI/CD to new one (#51) * moving out from old CI/CD to new one * Generate tern_utils_default_stats_formats_labels.md * Main ci cd (#53) * Update README.md withnew links * Update inspect.yaml to check * Update pkgdown.yaml to Docs * Update README.md * Add hexagon sticker (#38) * Generate pkgdown site favicon * add logo to inspect unit reports --------- Co-authored-by: RMao6 * Issue and PR templates (#60) * issue and PR templates * Update pkgdown.yaml stop running pkgdown for draft PRs --------- Co-authored-by: Gabe Becker Co-authored-by: RMao6 * Revert "merge main (#72)" (#74) This reverts commit 7e8c4af169b2b21253cfb7c6a6a69a5107a1d4a1. * Update NEWS.md * [skip roxygen] [skip vbump] Roxygen Man Pages Auto Update --------- Co-authored-by: Gabe Becker Co-authored-by: RMao6 Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> * update: move rbmi to suggest * update: prepare to CRAN release * update: add missing snapshot stats formats * update: rbmi related snapshots * update: rbmi example conditionned * update: remove rbmi rd refs * update: prepare release * update: R CMD check results * update: added suppressPackageStartupMessages(library(rbmi)) * update: remove comment in example * update: add helper function so we don't need to repeat assert rbmi everywhere * update: add helper function so we don't need to repeat assert rbmi everywhere * update: add rhub check * update: update get_formats_from_stats * update: add rbmi related snaps * update: remove .rtf from .Rbuilignore * [skip style] [skip vbump] Restyle files * update: add insight repos * update: fix linting * update: new R CMD Check * update: new R CMD Check * update: new R CMD Check * [skip style] [skip vbump] Restyle files * update: new R CMD Check install rbmi * update: new R CMD Check install rbmi * update: new R CMD Check install rbmi * update: fix linting * update: fix example * get full coverage of rbmi_pool() by adding tests from rbmi * [skip style] [skip vbump] Restyle files * test rbmi_analyse * [skip style] [skip vbump] Restyle files * add ignores around Air formatter * add tidyr to suggests * update: remove .rprofile * fix: rbmi in rd docs * fix: missing comma * update: news.md * [skip style] [skip vbump] Restyle files * update: news.md * update: WORDLIST * update: do_exclude_split * fix: merge issues with .rd links * fix: WORDLIST --------- Co-authored-by: RMao6 Co-authored-by: Gabe Becker Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Daniel Sabanes Bove * update: remotes * update: Consistent `tt_to_tbldf()` function behavior with invalid structures (#116) * fix: tests * fix: rd docs * [skip style] [skip vbump] Restyle files * fix: lintr * fix: lintr * [skip style] [skip vbump] Restyle files * update: added WWojciak as contributores * update: remove temp_ilse * update: remove duplicate * update: remove pharmaverseadam * update: keep pharmaverseadam because in news.md * update: merge issues * update: fix warnings --------- Co-authored-by: Wojtek <11532997+wwojciech@users.noreply.github.com> Co-authored-by: Ilse <55379552+iaugusty@users.noreply.github.com> Co-authored-by: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: RMao6 Co-authored-by: Gabe Becker Co-authored-by: Daniel Sabanes Bove Co-authored-by: Ezequiel Anokian --- .Rbuildignore | 2 + .github/ISSUE_TEMPLATE/bug.yml | 31 +++ .github/ISSUE_TEMPLATE/cran-release.yml | 122 +++++++++ .github/ISSUE_TEMPLATE/feature.yml | 13 + .github/ISSUE_TEMPLATE/release.yml | 121 +++++++++ .github/pull_request_template.md | 10 + .github/workflows/R-CMD-check.yaml | 59 +++++ .github/workflows/inspect.yaml | 128 +++++----- .github/workflows/pkgdown.yaml | 13 +- .github/workflows/rhub.yaml | 95 +++++++ .gitignore | 5 +- CHANGELOG.md | 12 +- DESCRIPTION | 16 +- NAMESPACE | 2 + NEWS.md | 18 +- R/a_maxlev.R | 25 +- R/a_two_tier.R | 235 ++++++++++++++++++ R/ancova_rbmi.R | 53 ++-- R/h_freq_funs.R | 7 + R/mmrm_rbmi.R | 24 +- R/pool_rbmi.R | 19 +- R/rbmi.R | 199 +++++++++------ R/sorting_functions.R | 16 +- R/tt_to_tblfile.R | 50 ++-- R/utils.R | 15 ++ _pkgdown.yml | 1 + cran-comments.md | 45 ++++ inst/WORDLIST | 11 +- man/a_maxlev.Rd | 18 +- man/a_two_tier.Rd | 197 +++++++++++++++ man/assert_rbmi.Rd | 15 ++ man/complex_scoring_function.Rd | 5 +- man/rbmi_analyse.Rd | 136 +++++----- man/rbmi_ancova.Rd | 16 +- man/rbmi_ancova_single.Rd | 19 +- man/rbmi_mmrm.Rd | 10 +- man/rbmi_pool.Rd | 8 +- man/tt_to_tbldf.Rd | 7 +- man/tt_to_tlgrtf.Rd | 4 +- tests/testthat/_snaps/a_freq_j.md | 22 ++ ...unco_utils_default_stats_formats_labels.md | 58 +---- tests/testthat/helper_rbmi.R | 75 ++++++ tests/testthat/test-a_freq_j.R | 44 ++++ tests/testthat/test-a_freq_resp_var_j.R | 28 +-- tests/testthat/test-a_maxlev.R | 30 +-- .../testthat/test-a_summarize_aval_chg_diff.R | 40 +-- tests/testthat/test-a_two_tier.R | 232 +++++++++++++++++ tests/testthat/test-analyze_values.R | 2 +- tests/testthat/test-ancova_rbmi.R | 6 + tests/testthat/test-cmp_functions.R | 2 +- tests/testthat/test-colwidths.R | 8 +- tests/testthat/test-count_pct.R | 148 +++++------ tests/testthat/test-count_pct_relrisk.R | 46 ++-- tests/testthat/test-coxph_hr.R | 28 +-- tests/testthat/test-coxreg_multivar.R | 4 +- tests/testthat/test-event_free.R | 20 +- ...junco_utils_default_stats_formats_labels.R | 13 +- tests/testthat/test-kaplan_meier.R | 38 +-- tests/testthat/test-mmrm_rbmi.R | 6 + tests/testthat/test-patyrs-eair100.R | 124 ++++----- tests/testthat/test-pool_rbmi.R | 63 ++--- tests/testthat/test-proportions.R | 2 +- tests/testthat/test-pruning_functions.R | 138 +++++----- tests/testthat/test-rbmi.R | 97 +++++--- tests/testthat/test-remove_col_count.R | 8 +- tests/testthat/test-resp01_functions.R | 8 +- tests/testthat/test-response_by_var.R | 20 +- tests/testthat/test-sorting_functions.R | 26 +- tests/testthat/test-split_functions.R | 28 +-- tests/testthat/test-summarize_ancova.R | 10 +- tests/testthat/test-summarize_row_counts.R | 20 +- tests/testthat/test-tabulate_lsmeans.R | 10 +- tests/testthat/test-tabulate_rbmi.R | 12 +- ...-tern_utils_default_stats_formats_labels.R | 6 +- tests/testthat/test-test_proportion_diff.R | 4 +- tests/testthat/test-tt_to_tblfile.R | 73 +++--- tests/testthat/test-varia.R | 181 +++++++------- 77 files changed, 2479 insertions(+), 983 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/bug.yml create mode 100644 .github/ISSUE_TEMPLATE/cran-release.yml create mode 100644 .github/ISSUE_TEMPLATE/feature.yml create mode 100644 .github/ISSUE_TEMPLATE/release.yml create mode 100644 .github/pull_request_template.md create mode 100644 .github/workflows/R-CMD-check.yaml create mode 100644 .github/workflows/rhub.yaml create mode 100644 R/a_two_tier.R create mode 100644 cran-comments.md create mode 100644 man/a_two_tier.Rd create mode 100644 man/assert_rbmi.Rd create mode 100644 tests/testthat/helper_rbmi.R create mode 100644 tests/testthat/test-a_two_tier.R diff --git a/.Rbuildignore b/.Rbuildignore index dc6f13a7..4fdc16bd 100644 --- a/.Rbuildignore +++ b/.Rbuildignore @@ -47,5 +47,7 @@ pkgdown ^output$ ^junco_tlg_template_scripts$ ^[^/]*\.R$ +^\.vscode$ +^[.]?air[.]toml$ ^CHANGELOG\.md$ ^air\.toml$ \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml new file mode 100644 index 00000000..9486373c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -0,0 +1,31 @@ +--- +name: 🐞 Bug Report +description: File a bug report +title: "[Bug]: " +labels: ["bug"] +body: + - type: markdown + attributes: + value: | + Thanks for taking the time to fill out this bug report! + - type: textarea + id: what-happened + attributes: + label: What happened? + description: Also tell us, what did you expect to happen? + placeholder: Tell us what you see! + value: "A bug happened!" + validations: + required: true + - type: textarea + id: session-info + attributes: + label: sessionInfo() + description: Please copy and paste your output from `sessionInfo()`. This will be automatically formatted into code, so no need for backticks. + render: R + - type: textarea + id: logs + attributes: + label: Relevant log output + description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks. + render: R diff --git a/.github/ISSUE_TEMPLATE/cran-release.yml b/.github/ISSUE_TEMPLATE/cran-release.yml new file mode 100644 index 00000000..b9144903 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/cran-release.yml @@ -0,0 +1,122 @@ +--- +name: 🎉 CRAN Release +description: Template for release to CRAN +title: "[CRAN Release]: <version>" +labels: ["release"] +assignees: + - KlaudiaBB + - cicdguy +body: + - type: markdown + attributes: + value: | + ⚠️ Please do not link or mention any internal references in this issue. This includes internal URLs, intellectual property and references. + - type: textarea + id: blocked-by + attributes: + label: Blocked by + description: Any PRs or issues that this release is blocked by. + placeholder: Add a list of blocking PRs or issues here. + value: | + #### PRs + - [ ] PR 1 + + #### Issues + - [ ] Issue 1 + validations: + required: true + - type: textarea + id: pre-release + attributes: + label: Pre-release + description: Pre-requisites that must be fulfilled before initiating the release process. + placeholder: Add your list of pre-requisites here. + value: | + - [ ] Make sure you adhere to CRAN submission policy: + * https://cran.r-project.org/web/packages/submission_checklist.html + * https://cran.r-project.org/web/packages/policies.html. + - [ ] Make sure that high priority bugs (label "priority" + "bug") have been resolved before going into the release. + - [ ] Review old/hanging PRs before going into the release (Optional). + - [ ] Revisit R-package's lifecycle badges (Optional). + - [ ] Make sure that all upstream dependencies of this package that need to be submitted to CRAN were accepted before going into release activities. + - [ ] Make sure integration tests are green 2-3 days before the release. Look carefully through logs (check for warnings and notes). + - [ ] Decide what gets merged in before starting release activities. + - type: textarea + id: release + attributes: + label: Release + description: The steps to be taken in order to create a release. + placeholder: Steps to create a release. + value: | + #### Prepare the release + - [ ] Create a new release candidate branch + `git checkout -b release-candidate-vX.Y.Z` + - [ ] Update NEWS.md file: make sure it reflects a holistic summary of what has changed in the package. + - [ ] Remove the additional fields (`Remotes`) from the DESCRIPTION file where applicable. + - [ ] Make sure that the minimum dependency versions are updated in the DESCRIPTION file for the package and its reverse dependencies (Optional). + - [ ] Increase versioned dependency on {package name} to >=X.Y.Z (Optional). + - [ ] Commit your changes and create the PR on GitHub (add "[skip vbump]" in the PR title). Add all updates, commit, and push changes: + ```r + # Make the necessary modifications to your files + # Stage the changes + git add <files your modified> + # Commit the changes + git commit -m "[skip vbump] <Your commit message>" + git push origin release-candidate-vX.Y.Z` + ``` + + + #### Test the release + - [ ] Execute the manual tests on Shiny apps that are deployed on various hosting providers (Posit connect and shinyapps.io) - track the results in GitHub issue (Applicable only for frameworks that use Shiny). + - [ ] Monitor integration tests, if integration fails, create priority issues on the board. + - [ ] Execute UAT tests (Optional). + + #### CRAN submission + - [ ] Tag the update(s) as a release candidate vX.Y.Z-rc<iteration-number> (e.g. v0.5.3-rc1) on the release candidate branch (release-candidate-vX.Y.Z). + ```r + # Create rc tag for submission for internal validation + git tag vX.Y.Z-rc<iteration number> + git push origin vX.Y.Z-rc<iteration number> + ``` + - [ ] Build the package locally using the command:`R CMD build .` which will generate a .tar.gz file necessary for the CRAN submission. + - [ ] Submit the package to https://win-builder.r-project.org/upload.aspx for testing, for more details please see "Building and checking R source packages for Windows": https://win-builder.r-project.org/. + - [ ] Once tested, send the package that was built in the previous steps to CRAN via this form: https://cran.r-project.org/submit.html. + - [ ] Address CRAN feedback, tag the package vX.Y.Z-rc(n+1) and repeat the submission to CRAN whenever necessary. + - [ ] Get the package accepted and published on CRAN. + + #### Tag the release + - [ ] If the additional fields were removed, add them back in a separate PR, and then merge the PR back to main (add "[skip vbump]" in the PR title). If nothing was removed just merge the PR you created in the "Prepare the release" section to 'main'. Note the commit hash of the merged commit. **Note:** additional commits might be added to the `main` branch by a bot or an automation - we do **NOT** want to tag this commit. + + ##### Make sure of the following before continuing + - [ ] CI checks are passing in GH before releasing the package. + - [ ] Shiny apps are deployable and there are no errors/warnings (Applicable only for frameworks that use Shiny). + + - [ ] Create a git tag with the final version set to vX.Y.Z on the main branch. In order to do this: + 1. Checkout the commit hash. + `git checkout <commit hash>` + 2. Tag the hash with the release version (vX.Y.Z). + `git tag vX.Y.Z` + 3. Push the tag to make the final release. + `git push origin vX.Y.Z` + - [ ] Update downstream package dependencies to (>=X.Y.Z) in {package name}. + **Note:** Once the release tag is created, the package is automatically published to internal repositories. + - type: textarea + id: post-release + attributes: + label: Post-release + description: The list of activities to be completed after the release. + placeholder: The steps that must be taken after the release. + value: | + - [ ] Ensure that CRAN checks are passing for the package. + - [ ] Make sure that the package is published to internal repositories. + - [ ] Make sure internal documentation is up to date. + - [ ] Review and update installation instructions for the package wherever needed (Optional). + - [ ] Announce the release on ________. + - type: textarea + id: decision-tree + attributes: + label: Decision tree + description: Any decision tree(s) that would aid release management + placeholder: Any decision tree(s) that would aid release management. + value: | + Click [here](https://github.com/insightsengineering/.github/blob/main/.github/ISSUE_TEMPLATE/RELEASE_DECISION_TREE.md) to see the release decision tree. diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml new file mode 100644 index 00000000..b1205abc --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -0,0 +1,13 @@ +--- +name: ✨ Feature Request +description: Request or propose a new feature +title: "[Feature Request]: <title>" +labels: ["enhancement"] +body: + - type: textarea + attributes: + label: Feature description + description: | + Do not use this form to ask a question, or ask for assistance. + Is your feature request related to a problem? Please describe with a clear and concise description of the problem. \n\n + Could you describe the solution you would like? Please provide a clear and concise description of what you want to happen. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/release.yml b/.github/ISSUE_TEMPLATE/release.yml new file mode 100644 index 00000000..af85dc2e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/release.yml @@ -0,0 +1,121 @@ +--- +name: 🚀 Release +description: Template for package release +title: "[Release]: <version>" +labels: ["release"] +assignees: + - munoztd0 + - gmbecker +body: + - type: markdown + attributes: + value: | + ⚠️ Please do not link or mention any internal references in this issue. This includes internal URLs, intellectual property and references. + - type: textarea + id: blocked-by + attributes: + label: Blocked by + description: Any PRs or issues that this release is blocked by. + placeholder: Add a list of blocking PRs or issues here. + value: | + #### PRs + - [ ] PR 1 + + #### Issues + - [ ] Issue 1 + validations: + required: true + - type: textarea + id: pre-release + attributes: + label: Pre-release + description: Pre-requisites that must be fulfilled before initiating the release process. + placeholder: Add your list of pre-requisites here. + value: | + - [ ] Make sure that high priority bugs (label "priority" + "bug") have been resolved before going into the release. + - [ ] Review old/hanging PRs before going into the release. + - [ ] Revisit R-package's lifecycle badges (Optional). + - [ ] Release Manager: Discuss package dependencies, create a plan to sequentially close release activities and submit groups of packages for internal validation (Applicable only for regulatory release). + - [ ] Check Validation Pipeline dry-run results for the package. + - [ ] Make sure all relevant integration tests are green 2-3 days before the release. Look carefully through logs (check for warnings and notes). + - [ ] Inform about the soft code freeze, decide what gets merged in before starting release activities. + - type: textarea + id: release + attributes: + label: Release + description: The steps to be taken in order to create a release. + placeholder: Steps to create a release. + value: | + #### Prepare the release + - [ ] Create a new release candidate branch + `git checkout -b release-candidate-vX.Y.Z` + - [ ] Update NEWS.md file: make sure it reflects a holistic summary of what has changed in the package, check README. + - [ ] Remove the additional fields (`Remotes`) from the DESCRIPTION file where applicable. + - [ ] Make sure that the minimum dependency versions are updated in the DESCRIPTION file for the package. + - [ ] Increase versioned dependency on {package name} to >=X.Y.Z. + - [ ] Commit your changes and create the PR on GitHub (add "[skip vbump]" in the PR title). Add all updates, commit, and push changes: + ```r + # Make the necessary modifications to your files + # Stage the changes + git add <files your modified> + # Commit the changes + git commit -m "[skip vbump] <Your commit message>" + git push origin release-candidate-vX.Y.Z + ``` + + + #### Test the release + - [ ] Execute the manual tests on Shiny apps that are deployed on various hosting providers (Posit connect and shinyapps.io) - track the results in GitHub issue (Applicable only for frameworks that use Shiny). + - [ ] Monitor integration tests, if integration fails, create priority issues on the board. + - [ ] Execute UAT tests (Optional). + + + #### Validation loop + + **Note:** This section is applicable only for regulatory packages. + + - [ ] Tag the update(s) as a release candidate vX.Y.Z-rc<iteration-number> (e.g. v0.5.3-rc1) on the release candidate branch (release-candidate-vX.Y.Z). + ```r + # Create rc tag for submission for internal validation + git tag vX.Y.Z-rc<iteration number> + git push origin vX.Y.Z-rc<iteration number> + ``` + - [ ] Submit the package for internal validation. + - [ ] Address any feedback (internal validation/user testing), retag the package as a release candidate vX.Y.Z-rc(n+1). Repeat the submission for internal validation if necessary. + - [ ] Get the package validated. + + #### Tag the release + - [ ] If the additional fields were removed, add them back in a separate PR, and then merge the PR back to main (add "[skip vbump]" in the PR title). If nothing was removed just merge the PR you created in the "Prepare the release" section to `main`. Note the commit hash of the merged commit. **Note:** additional commits might be added to the `main` branch by a bot or an automation - we do **NOT** want to tag this commit. + + ##### Make sure of the following before continuing with the release: + - [ ] CI checks are passing in GH. + + - [ ] Create a git tag with the final version set to vX.Y.Z on the main branch. In order to do this: + 1. Checkout the commit hash. + `git checkout <commit hash>` + 2. Tag the hash with the release version (vX.Y.Z). + `git tag vX.Y.Z` + 3. Push the tag to make the final release. + `git push origin vX.Y.Z` + - [ ] Update downstream package dependencies to (>=X.Y.Z) in {package name}. + **Note:** Once the release tag is created, the package is automatically published to internal repositories. + - type: textarea + id: post-release + attributes: + label: Post-release + description: The list of activities to be completed after the release. + placeholder: The steps that must be taken after the release. + value: | + - [ ] Make sure that the package is published to internal repositories (Validated and/or Non-Validated repository). + - [ ] Review and update installation instructions for the package if needed. + - [ ] Make sure internal documentation/documentation catalogs are up to date. + - [ ] Notify the team to start post-release/clean-up activities. + - [ ] Announce the release on ________. + - type: textarea + id: decision-tree + attributes: + label: Decision tree + description: Any decision tree(s) that would aid release management + placeholder: Any decision tree(s) that would aid release management. + value: | + Click [here](https://github.com/insightsengineering/.github/blob/main/.github/ISSUE_TEMPLATE/RELEASE_DECISION_TREE.md) to see the release decision tree. diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..58a94427 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,10 @@ +# Pull Request + +<!--- Replace `#nnn` with your issue link for reference. --> + +Fixes #nnn + + +## Checks + +- [ ] (Have you updated the changelog.md ?) \ No newline at end of file diff --git a/.github/workflows/R-CMD-check.yaml b/.github/workflows/R-CMD-check.yaml new file mode 100644 index 00000000..a4ce8ae7 --- /dev/null +++ b/.github/workflows/R-CMD-check.yaml @@ -0,0 +1,59 @@ +# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples +# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help +on: + pull_request: + types: + - opened + - synchronize + - reopened + - ready_for_review + push: + branches: + - main + +name: R-CMD-check.yaml + +permissions: read-all + +jobs: + R-CMD-check: + runs-on: ${{ matrix.config.os }} + + name: ${{ matrix.config.os }} (${{ matrix.config.r }}) + + strategy: + fail-fast: false + matrix: + config: + - {os: macos-latest, r: 'release'} + - {os: windows-latest, r: 'release'} + - {os: ubuntu-latest, r: 'release'} + + env: + GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} + R_KEEP_PKG_SOURCE: yes + + steps: + - uses: actions/checkout@v4 + + - uses: r-lib/actions/setup-pandoc@v2 + + - uses: r-lib/actions/setup-r@v2 + with: + r-version: ${{ matrix.config.r }} + http-user-agent: ${{ matrix.config.http-user-agent }} + use-public-rspm: true + + - name: Install rbmi + run: install.packages('rbmi', repos = c('https://insightsengineering.r-universe.dev')) + shell: Rscript {0} + + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: any::rcmdcheck, rbmi=?ignore + needs: check + + - uses: r-lib/actions/check-r-package@v2 + with: + upload-snapshots: true + build_args: 'c("--no-manual","--compact-vignettes=gs+qpdf")' diff --git a/.github/workflows/inspect.yaml b/.github/workflows/inspect.yaml index 7cf64088..4f095a21 100644 --- a/.github/workflows/inspect.yaml +++ b/.github/workflows/inspect.yaml @@ -1,5 +1,5 @@ --- -name: Check 🛠 +name: Inspection 🛠 on: pull_request: @@ -17,73 +17,73 @@ jobs: audit: name: Audit Dependencies 🕵️‍♂️ uses: insightsengineering/r.pkg.template/.github/workflows/audit.yaml@main - r-cmd: - name: R CMD Check 🧬 - uses: insightsengineering/r.pkg.template/.github/workflows/build-check-install.yaml@main - secrets: - REPO_GITHUB_TOKEN: ${{ secrets.REPO_GITHUB_TOKEN }} - with: - deps-installation-method: setup-r-dependencies - additional-env-vars: | - _R_CHECK_CRAN_INCOMING_REMOTE_=false - additional-r-cmd-check-params: --as-cran - enforce-note-blocklist: true - note-blocklist: | - checking dependencies in R code .* NOTE - checking R code for possible problems .* NOTE - checking examples .* NOTE - checking Rd line widths .* NOTE - checking S3 generic/method consistency .* NOTE - checking Rd .usage sections .* NOTE - checking for unstated dependencies in vignettes .* NOTE - checking top-level files .* NOTE - unit-test-report-brand: >- - https://raw.githubusercontent.com/johnsonandjohnson/junco/main/man/figures/logo.png - r-cmd-non-cran: - name: R CMD Check (non-CRAN) 🧬 - uses: insightsengineering/r.pkg.template/.github/workflows/build-check-install.yaml@main - secrets: - REPO_GITHUB_TOKEN: ${{ secrets.REPO_GITHUB_TOKEN }} - with: - deps-installation-method: setup-r-dependencies - additional-env-vars: | - _R_CHECK_CRAN_INCOMING_REMOTE_=false - NOT_CRAN=true - enforce-note-blocklist: true - concurrency-group: non-cran - unit-test-report-directory: unit-test-report-non-cran - note-blocklist: | - checking dependencies in R code .* NOTE - checking R code for possible problems .* NOTE - checking examples .* NOTE - checking Rd line widths .* NOTE - checking S3 generic/method consistency .* NOTE - checking Rd .usage sections .* NOTE - checking for unstated dependencies in vignettes .* NOTE - checking top-level files .* NOTE - unit-test-report-brand: >- - https://raw.githubusercontent.com/johnsonandjohnson/junco/main/man/figures/logo.png - coverage: - name: Coverage 📔 - uses: insightsengineering/r.pkg.template/.github/workflows/test-coverage.yaml@main - secrets: - REPO_GITHUB_TOKEN: ${{ secrets.REPO_GITHUB_TOKEN }} - with: - deps-installation-method: setup-r-dependencies - additional-env-vars: | - NOT_CRAN=true +# r-cmd: +# name: R CMD Check 🧬 +# uses: insightsengineering/r.pkg.template/.github/workflows/build-check-install.yaml@main +# secrets: +# REPO_GITHUB_TOKEN: ${{ secrets.REPO_GITHUB_TOKEN }} +# with: +# deps-installation-method: setup-r-dependencies +# additional-env-vars: | +# _R_CHECK_CRAN_INCOMING_REMOTE_=false +# additional-r-cmd-check-params: --as-cran +# enforce-note-blocklist: true +# note-blocklist: | +# checking dependencies in R code .* NOTE +# checking R code for possible problems .* NOTE +# checking examples .* NOTE +# checking Rd line widths .* NOTE +# checking S3 generic/method consistency .* NOTE +# checking Rd .usage sections .* NOTE +# checking for unstated dependencies in vignettes .* NOTE +# checking top-level files .* NOTE +# unit-test-report-brand: >- +# https://raw.githubusercontent.com/johnsonandjohnson/junco/main/man/figures/logo.png +# r-cmd-non-cran: +# name: R CMD Check (non-CRAN) 🧬 +# uses: insightsengineering/r.pkg.template/.github/workflows/build-check-install.yaml@main +# secrets: +# REPO_GITHUB_TOKEN: ${{ secrets.REPO_GITHUB_TOKEN }} +# with: +# deps-installation-method: setup-r-dependencies +# additional-env-vars: | +# _R_CHECK_CRAN_INCOMING_REMOTE_=false +# NOT_CRAN=true +# enforce-note-blocklist: true +# concurrency-group: non-cran +# unit-test-report-directory: unit-test-report-non-cran +# note-blocklist: | +# checking dependencies in R code .* NOTE +# checking R code for possible problems .* NOTE +# checking examples .* NOTE +# checking Rd line widths .* NOTE +# checking S3 generic/method consistency .* NOTE +# checking Rd .usage sections .* NOTE +# checking for unstated dependencies in vignettes .* NOTE +# checking top-level files .* NOTE +# unit-test-report-brand: >- +# https://raw.githubusercontent.com/johnsonandjohnson/junco/main/man/figures/logo.png +# coverage: +# name: Coverage 📔 +# uses: insightsengineering/r.pkg.template/.github/workflows/test-coverage.yaml@main +# secrets: +# REPO_GITHUB_TOKEN: ${{ secrets.REPO_GITHUB_TOKEN }} +# with: +# deps-installation-method: setup-r-dependencies +# additional-env-vars: | +# NOT_CRAN=true linter: if: github.event_name != 'push' name: Linter 🦸‍♀️ uses: johnsonandjohnson/pharmaversesdtmjnj/.github/workflows/lint.yaml@main - roxygen: - name: Roxygen 🅾 - uses: insightsengineering/r.pkg.template/.github/workflows/roxygen.yaml@main - secrets: - REPO_GITHUB_TOKEN: ${{ secrets.REPO_GITHUB_TOKEN }} - with: - deps-installation-method: setup-r-dependencies - auto-update: true +# roxygen: +# name: Roxygen 🅾 +# uses: insightsengineering/r.pkg.template/.github/workflows/roxygen.yaml@main +# secrets: +# REPO_GITHUB_TOKEN: ${{ secrets.REPO_GITHUB_TOKEN }} +# with: +# deps-installation-method: setup-r-dependencies +# auto-update: true gitleaks: name: gitleaks 💧 uses: insightsengineering/r.pkg.template/.github/workflows/gitleaks.yaml@main diff --git a/.github/workflows/pkgdown.yaml b/.github/workflows/pkgdown.yaml index e6b50b4f..0d686c32 100644 --- a/.github/workflows/pkgdown.yaml +++ b/.github/workflows/pkgdown.yaml @@ -4,6 +4,11 @@ on: push: branches: [main] pull_request: + types: + - opened + - synchronize + - reopened + - ready_for_review release: types: [published] workflow_dispatch: @@ -30,10 +35,16 @@ jobs: - uses: r-lib/actions/setup-r@v2 with: use-public-rspm: true + extra-repositories: | + https://insightsengineering.r-universe.dev/ + + - name: Install rbmi + run: install.packages('rbmi', repos = c('https://insightsengineering.r-universe.dev')) + shell: Rscript {0} - uses: r-lib/actions/setup-r-dependencies@v2 with: - extra-packages: any::pkgdown, local::. + extra-packages: any::pkgdown, local::., rbmi=?ignore needs: website - name: Build site diff --git a/.github/workflows/rhub.yaml b/.github/workflows/rhub.yaml new file mode 100644 index 00000000..74ec7b05 --- /dev/null +++ b/.github/workflows/rhub.yaml @@ -0,0 +1,95 @@ +# R-hub's generic GitHub Actions workflow file. It's canonical location is at +# https://github.com/r-hub/actions/blob/v1/workflows/rhub.yaml +# You can update this file to a newer version using the rhub2 package: +# +# rhub::rhub_setup() +# +# It is unlikely that you need to modify this file manually. + +name: R-hub +run-name: "${{ github.event.inputs.id }}: ${{ github.event.inputs.name || format('Manually run by {0}', github.triggering_actor) }}" + +on: + workflow_dispatch: + inputs: + config: + description: 'A comma separated list of R-hub platforms to use.' + type: string + default: 'linux,windows,macos' + name: + description: 'Run name. You can leave this empty now.' + type: string + id: + description: 'Unique ID. You can leave this empty now.' + type: string + +jobs: + + setup: + runs-on: ubuntu-latest + outputs: + containers: ${{ steps.rhub-setup.outputs.containers }} + platforms: ${{ steps.rhub-setup.outputs.platforms }} + + steps: + # NO NEED TO CHECKOUT HERE + - uses: r-hub/actions/setup@v1 + with: + config: ${{ github.event.inputs.config }} + id: rhub-setup + + linux-containers: + needs: setup + if: ${{ needs.setup.outputs.containers != '[]' }} + runs-on: ubuntu-latest + name: ${{ matrix.config.label }} + strategy: + fail-fast: false + matrix: + config: ${{ fromJson(needs.setup.outputs.containers) }} + container: + image: ${{ matrix.config.container }} + + steps: + - uses: r-hub/actions/checkout@v1 + - uses: r-hub/actions/platform-info@v1 + with: + token: ${{ secrets.RHUB_TOKEN }} + job-config: ${{ matrix.config.job-config }} + - uses: r-hub/actions/setup-deps@v1 + with: + token: ${{ secrets.RHUB_TOKEN }} + job-config: ${{ matrix.config.job-config }} + - uses: r-hub/actions/run-check@v1 + with: + token: ${{ secrets.RHUB_TOKEN }} + job-config: ${{ matrix.config.job-config }} + + other-platforms: + needs: setup + if: ${{ needs.setup.outputs.platforms != '[]' }} + runs-on: ${{ matrix.config.os }} + name: ${{ matrix.config.label }} + strategy: + fail-fast: false + matrix: + config: ${{ fromJson(needs.setup.outputs.platforms) }} + + steps: + - uses: r-hub/actions/checkout@v1 + - uses: r-hub/actions/setup-r@v1 + with: + job-config: ${{ matrix.config.job-config }} + token: ${{ secrets.RHUB_TOKEN }} + - uses: r-hub/actions/platform-info@v1 + with: + token: ${{ secrets.RHUB_TOKEN }} + job-config: ${{ matrix.config.job-config }} + - uses: r-hub/actions/setup-deps@v1 + with: + job-config: ${{ matrix.config.job-config }} + token: ${{ secrets.RHUB_TOKEN }} + - uses: r-hub/actions/run-check@v1 + with: + job-config: ${{ matrix.config.job-config }} + token: ${{ secrets.RHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index d5cdecd8..8ed5d2b4 100644 --- a/.gitignore +++ b/.gitignore @@ -32,4 +32,7 @@ programs_external/* #IDE .vscode/settings.json .idea/* -temp_ilse/* \ No newline at end of file + +#Air +air.toml +.vscode/extensions.json diff --git a/CHANGELOG.md b/CHANGELOG.md index 9cea935d..2b747eb6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [0.1.3] - 2025-11-28 +## [0.1.2] - 2025-12-10 ### Added and Removed +- Add `a_two_tier()` analysis function - Add `a_maxlev()` to be able to calculate count and percentage of the maximum level of an ordered factor per subject - Remove `brackets_to_rtf()` - Export `rbmi_pool()` #22 @@ -16,18 +17,21 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added standard error (SE) column for each treatment arm's least square (LS) means estimate to the `summarize_lsmeans_wide()` layout. - Added the internal function `do_exclude_split()` to facilitate the exclusion of specified row splits from analysis functions. - Remove some unused functions (jj_uc_map, postfun_cog, postfun_eq5d, column_N, non_blank_sentinel, null_fn, unicodify +- Add extra check for existence of `.alt_df_full` when layout has risk difference column and a row-split (h_create_alt_df) #120. + ### Changed - Replace {pharmaverseadam} with {pharmaverseadamjnj} - Update pruning_functions.R -- update `string_to_title()` to handle factors (#26) +- Update `string_to_title()` to handle factors (#26) +- Moved rbmi to suggest +- Replaced `denom_df` with `.alt_df_full` in `a_maxlev()`. ### Fixed - Fixes #102 bug inappropriate warnings from `cond_rm_facets` function - Fix bug for not selecting NA records in `h_subset_combo()` +- Consistent `tt_to_tbldf()` function behavior with invalid structures #116 -## [0.1.2] - 2025-11-20 -- Removed rbmi ## [0.1.1] - 2025-07-28 diff --git a/DESCRIPTION b/DESCRIPTION index 89a814b6..99fd1643 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,7 +1,7 @@ Package: junco Title: Create Common Tables and Listings Used in Clinical Trials -Version: 0.1.3 -Date: 2025-06-20 +Version: 0.1.2 +Date: 2025-12-04 Authors@R: c( person("Gabriel", "Becker", , "gabembecker@gmail.com", role = c("cre", "aut"), comment = "Original creator of the package, and author of included formatters functions"), @@ -14,6 +14,7 @@ Authors@R: c( person("Ezequiel", "Anokian", , "eanokia1@its.jnj.com", role = c("ctb")), person("Renfei", "Mao", , "rmao6@its.jnj.com", role = c("ctb")), person("Mrinal", "Das", , "mdas35@its.jnj.com", role = c("ctb")), + person("Wojciech", "Wojciak", , "wwojciak@its.jnj.com", role = c("ctb")), person("Isaac", "Gravestock", role = "cph", comment = "Author of included rbmi functions"), person("Joe", "Zhu", role = "cph", @@ -66,13 +67,18 @@ Suggests: forcats (>= 1.0.0), testthat (>= 3.0.0), mockery, + mvtnorm, parallel, readxl, rlang, + rbmi (>= 1.3.0), + tidyr, + rlang, pharmaverseadamjnj VignetteBuilder: knitr Config/testthat/edition: 3 Remotes: - insightsengineering/formatters@1040_rtables_round_type, - insightsengineering/rtables@1040_rtables_round_type, - insightsengineering/rlistings@1040_rtables_round_type + insightsengineering/formatters@main, + insightsengineering/rtables@main, + insightsengineering/rlistings@main +Additional_repositories: https://insightsengineering.r-universe.dev/ diff --git a/NAMESPACE b/NAMESPACE index 02778548..d9eb34b4 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -27,6 +27,7 @@ export(a_summarize_aval_chg_diff_j) export(a_summarize_ex_j) export(a_summarize_mmrm) export(a_test_proportion_diff) +export(a_two_tier) export(analyze_values) export(bspt_pruner) export(build_formula) @@ -122,6 +123,7 @@ export(summarize_lsmeans_wide) export(summarize_row_counts) export(tt_to_tlgrtf) export(var_relabel_list) +import(checkmate) import(formatters) import(methods) import(rlistings) diff --git a/NEWS.md b/NEWS.md index 530347d3..fda8f021 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,11 +1,19 @@ -# junco 0.1.3 +# junco 0.1.2 ## New features -* Added validation of table structure in `tt_to_tlgrtf()` using `rtables::validate_table_struct()` with a warning if the structure is invalid +- Added validation of table structure in `tt_to_tlgrtf()` using `rtables::validate_table_struct()` with a warning if the structure is invalid +- Added standard error (SE) column for each treatment arm's least square (LS) means estimate to the `summarize_lsmeans_wide()` layout. +- Add `a_maxlev()` to be able to calculate count and percentage of the maximum level of an ordered factor per subject ## Minor improvements and bug fixes -* Bug fixes and minor improvements -* Enhanced documentation -* Updated dependencies \ No newline at end of file +- Remove some unused functions (jj_uc_map, postfun_cog, postfun_eq5d, column_N, non_blank_sentinel, null_fn, unicodify +- Replace {pharmaverseadam} with {pharmaverseadamjnj} +- Fix bug for not selecting NA records in `h_subset_combo()` +- Update `string_to_title()` to handle factors (#26) +- Consistent `tt_to_tbldf()` function behavior with invalid structures (#116) + +## Other changes + +* Moved 'rbmi' from 'Imports' to 'Suggests' and made package fully functional without it diff --git a/R/a_maxlev.R b/R/a_maxlev.R index 4e191b1d..af36a12f 100644 --- a/R/a_maxlev.R +++ b/R/a_maxlev.R @@ -26,10 +26,12 @@ #' `df[[.var]]` is included in the final count and percentage statistics. #' #' @inheritParams proposal_argument_convention -#' @param denom_df (`data.frame`)\cr -#' A dataset used to compute the denominator for proportions. Required when -#' the same subject appears multiple times in the dataset due to treatment -#' sequences. +#' @param .alt_df_full (`dataframe`)\cr A dataset used to compute the +#' denominator for proportions. This is required when the same subject appears +#' multiple times in the dataset due to treatment sequences. +#' `colnames(.alt_df_full)` must be a superset of `id`. +#' This argument gets populated by the rtables split machinery +#' (see [rtables::additional_fun_params]). #' @param any_level (`flag`)\cr #' Should be set to `TRUE` when the function is used as a `cfun`. #' @param any_level_exclude (`character`)\cr @@ -66,27 +68,27 @@ #' summarize_row_groups( #' "AESEV", #' cfun = a_maxlev, -#' extra_args = list(id = "ID", denom_df = my_adsl, any_level = TRUE) +#' extra_args = list(id = "ID", any_level = TRUE) #' ) |> #' analyze( #' "AESEV", #' afun = a_maxlev, -#' extra_args = list(id = "ID", denom_df = my_adsl) +#' extra_args = list(id = "ID") #' ) -#' build_table(lyt, my_adae) +#' build_table(lyt, my_adae, alt_counts_df = my_adsl) a_maxlev <- function(df, labelstr = NULL, .var, .spl_context, id = "USUBJID", - denom_df, + .alt_df_full = NULL, any_level = FALSE, any_level_exclude = "Missing", ...) { checkmate::assert_factor(df[[.var]], ordered = TRUE, any.missing = FALSE) checkmate::assert_string(id) checkmate::assert_subset(id, choices = colnames(df)) - checkmate::assert_subset(id, choices = colnames(denom_df)) + checkmate::assert_subset(id, choices = colnames(.alt_df_full)) checkmate::assert_flag(any_level) checkmate::assert_character(any_level_exclude, any.missing = FALSE, unique = TRUE) @@ -109,10 +111,7 @@ a_maxlev <- function(df, table(max_per_id) } - # TODO: N is currently computed using the `denom_df` extra argument. - # This is a temporary placeholder until the new version of rtables is released, - # after which `.alt_count_df` will be available for use in `afun`/`cfun`. - cur_col <- subset(denom_df, eval(.spl_context$cur_col_expr[[1]])) + cur_col <- subset(.alt_df_full, eval(.spl_context$cur_col_expr[[1]])) N <- length(unique(cur_col[[id]])) as.list(data.frame(rbind(count_max, count_max / N))) } else { diff --git a/R/a_two_tier.R b/R/a_two_tier.R new file mode 100644 index 00000000..90fc60bb --- /dev/null +++ b/R/a_two_tier.R @@ -0,0 +1,235 @@ +#' @name a_two_tier +#' +#' @title Two Tier Analysis Function +#' +#' @author GB, WW. +#' +#' @description The analysis function used as an `afun` in \link[rtables]{analyze}. +#' This function simulates a final additional level of nesting with a +#' traditional analyze call inside it. +#' +#' This makes it possible to create what *appear to be* group summary or +#' content rows and to *optionally or conditionally* generate one or more +#' "detail" rows underneath it. +#' +#' For example, in a disposition table, one might want counts for completed +#' and ongoing patients with no further detail underneath, but a breakdown of +#' specific reasons beneath the count of patients who discontinued treatment. +#' +#' @details Both the analysis variable and `inner_var` must be factors. +#' Detail rows are differentiated by having an indent mod of one, causing them +#' to hang indented under their corresponding group row. +#' +#' @note In its current form, this function may not retain any formatting or +#' labeling instructions added by `grp_fun` or `detail_fun`, and it will +#' override any `.indent_mods` values specified by them. This behavior may +#' change in future versions. +#' +#' @inheritParams proposal_argument_convention +#' @param inner_var (`string`)\cr single variable name to use when generating +#' the detail rows. +#' @param drill_down_levs (`character`)\cr the level(s) of `.var` under which +#' detail rows should be generated. +#' @param use_all_levels (`flag`)\cr controls which factor levels will be +#' present for `inner_var` (both in `df`/`x` and in `.df_row`) when calling +#' `detail_fun`. +#' If `TRUE`, all levels (those present on the factor `.df_row[[inner_var]]`, +#' *regardless if the level is observed in the row group or not) will be +#' present when creating detail rows. +#' Otherwise (the default), only the levels observed +#' *anywhere in the row group, i.e., within `.df_row`* will be present. +#' @param grp_fun (`function`)\cr analysis function to be used when generating +#' the "group summary" outer rows. +#' @param detail_fun (`function`)\cr analysis function to be used when generating +#' "detail" inner rows. +#' @param .alt_df_full (`dataframe`)\cr denominator dataset for fraction and +#' relative risk calculations.\cr +#' this argument gets populated by the rtables split machinery +#' (see [rtables::additional_fun_params]). +#' @param ... additional arguments passed directly to `grp_fun` and `detail_fun`. +#' +#' @return A `RowsVerticalSection` object including both the group row and all +#' detail rows, if applicable, for the facet. +#' +#' @import checkmate +#' @export +#' +#' @examples +#' +#' # Example 1 +#' +#' lyt_obs_levels <- basic_table() |> +#' split_cols_by("ARM") |> +#' split_rows_by("EOSSTT", child_labels = "hidden") |> +#' analyze("EOSSTT", +#' afun = a_two_tier, +#' extra_args = list( +#' grp_fun = simple_analysis, +#' detail_fun = simple_analysis, +#' inner_var = "DCSREAS", +#' drill_down_levs = "DISCONTINUED" +#' ) +#' ) +#' +#' tbl <- build_table(lyt_obs_levels, ex_adsl) +#' tbl +#' +#' lyt_all_levels <- basic_table() |> +#' split_cols_by("ARM") |> +#' split_rows_by("EOSSTT", child_labels = "hidden") |> +#' analyze("EOSSTT", +#' afun = a_two_tier, +#' extra_args = list( +#' grp_fun = simple_analysis, +#' detail_fun = simple_analysis, +#' inner_var = "DCSREAS", +#' drill_down_levs = "DISCONTINUED", +#' use_all_levels = TRUE +#' ) +#' ) +#' +#' adsl_subset <- subset(ex_adsl, DCSREAS != "ADVERSE EVENT") +#' levels(adsl_subset$DCSREAS) +#' +#' tbl_all_levels <- build_table(lyt_all_levels, adsl_subset) +#' tbl_all_levels +#' +#' tbl_obs_levels <- build_table(lyt_obs_levels, adsl_subset) +#' tbl_obs_levels +#' +#' # Example 2 +#' +#' library(dplyr) +#' +#' trtvar <- "ARM" +#' ctrl_grp <- "B: Placebo" +#' +#' adsl <- ex_adsl |> select(c("USUBJID", "STRATA1", "EOSSTT", "DCSREAS", all_of(trtvar))) +#' adsl$colspan_trt <- factor( +#' ifelse(adsl[[trtvar]] == ctrl_grp, " ", "Active Study Agent"), +#' levels = c("Active Study Agent", " ") +#' ) +#' adsl$rrisk_header <- "Risk Difference (%) (95% CI)" +#' adsl$rrisk_label <- paste(adsl[[trtvar]], paste("vs", ctrl_grp)) +#' +#' colspan_trt_map <- create_colspan_map( +#' df = adsl, +#' non_active_grp = ctrl_grp, +#' non_active_grp_span_lbl = " ", +#' active_grp_span_lbl = "Active Study Agent", +#' colspan_var = "colspan_trt", +#' trt_var = trtvar +#' ) +#' +#' a_freq_j_args <- list( +#' .stats = "count_unique_fraction", +#' denom = "n_altdf", +#' ref_path = c("colspan_trt", " ", trtvar, ctrl_grp) +#' ) +#' +#' two_tier_args <- list( +#' grp_fun = a_freq_j, +#' detail_fun = a_freq_j, +#' inner_var = "DCSREAS", +#' drill_down_levs = "DISCONTINUED" +#' ) +#' +#' lyt_rrisk <- basic_table() |> +#' split_cols_by("colspan_trt", split_fun = trim_levels_to_map(map = colspan_trt_map)) |> +#' split_cols_by(trtvar) |> +#' split_cols_by("rrisk_header", nested = FALSE) |> +#' split_cols_by(trtvar, labels_var = "rrisk_label", split_fun = remove_split_levels(ctrl_grp)) |> +#' split_rows_by("STRATA1") |> +#' split_rows_by("EOSSTT", child_labels = "hidden") |> +#' analyze("EOSSTT", afun = a_two_tier, extra_args = c(two_tier_args, a_freq_j_args)) +#' +#' adsl_subset <- subset( +#' adsl, +#' EOSSTT != "COMPLETED" & (is.na(DCSREAS) | DCSREAS != "PROTOCOL VIOLATION") +#' ) +#' +#' tbl_rrisk <- build_table(lyt_rrisk, adsl_subset, alt_counts_df = adsl_subset) +#' tbl_rrisk +#' +a_two_tier <- function(df, + labelstr = NULL, + .var, + .N_col, + .df_row, + inner_var, + drill_down_levs, + .spl_context, + use_all_levels = FALSE, + grp_fun, + detail_fun, + .alt_df_full = NULL, + ...) { + assert_string(inner_var) + assert_character(drill_down_levs) + assert_flag(use_all_levels) + assert_function(grp_fun) + assert_function(detail_fun) + + cur_grp <- tail(.spl_context$value, 1) + df[[.var]] <- factor(df[[.var]], levels = cur_grp) + .df_row[[.var]] <- factor(.df_row[[.var]], levels = cur_grp) + args <- list( + labelstr = labelstr, + .var = .var, + .N_col = .N_col, + .df_row = .df_row, + .spl_context = .spl_context, + .alt_df_full = .alt_df_full, + ... + ) + if (names(formals(grp_fun))[1] == "df") { + args <- c(list(df = df), args) + } else { + args <- c(list(x = df[[.var]]), args) + } + cell_vals <- unclass( + do.call(grp_fun, args) + ) + names(cell_vals) <- attr(cell_vals, "row_label") + + ## calculate the drill-down values if necessary + if (cur_grp %in% drill_down_levs && any(!is.na(df[[inner_var]]))) { + ## have to make sure we use all levels for the whole row group + ## so that each column gets the same number of values in the same order + all_inner_levs <- levels(df[[inner_var]]) + if (use_all_levels) { + detail_levs <- all_inner_levs + } else { + detail_levs <- all_inner_levs[all_inner_levs %in% all_inner_levs[.df_row[[inner_var]]]] + } + + inner_vec <- factor(df[[inner_var]], levels = detail_levs) + df[[inner_var]] <- factor(df[[inner_var]], levels = detail_levs) + .df_row[[inner_var]] <- factor(.df_row[[inner_var]], levels = detail_levs) + det_args <- list( + .var = inner_var, + .N_col = .N_col, + .df_row = .df_row, + .spl_context = .spl_context, + .alt_df_full = .alt_df_full, + ... + ) + if (names(formals(detail_fun))[1] == "df") { + det_args <- c(list(df = df), det_args) + } else { + det_args <- c(list(x = df[[inner_var]]), det_args) + } + + detail_vals <- unclass( + do.call(detail_fun, det_args) + ) + + names(detail_vals) <- attr(detail_vals, "row_label") + } else { + detail_vals <- list() + } + in_rows( + .list = c(cell_vals, detail_vals), + .indent_mods = c(0, rep(1, length.out = length(detail_vals))) + ) +} diff --git a/R/ancova_rbmi.R b/R/ancova_rbmi.R index e4c45485..335b6788 100644 --- a/R/ancova_rbmi.R +++ b/R/ancova_rbmi.R @@ -4,14 +4,14 @@ #' "treatment effect" (i.e. the contrast between the two treatment groups) and #' the least square means estimates in each group. #' -#' @param data (`data.frame`)\cr A `data.frame` containing the data to be used in the model. -#' @param vars (`list`)\cr A `vars` object as generated by [rbmi::set_vars()]. Only the `group`, +#' @param data A `data.frame` containing the data to be used in the model. +#' @param vars A `vars` object as generated by the set_vars() function from the rbmi package. Only the `group`, #' `visit`, `outcome` and `covariates` elements are required. See details. -#' @param visits (`character vector`)\cr An optional character vector specifying which visits to +#' @param visits An optional character vector specifying which visits to #' fit the ancova model at. If `NULL`, a separate ancova model will be fit to the #' outcomes for each visit (as determined by `unique(data[[vars$visit]])`). #' See details. -#' @param weights (`character`)\cr Character, either `"counterfactual"` (default), `"equal"`, +#' @param weights Character, either `"counterfactual"` (default), `"equal"`, #' `"proportional_em"` or `"proportional"`. #' Specifies the weighting strategy to be used when calculating the lsmeans. #' See the weighting section for more details. @@ -25,10 +25,10 @@ #' 4. Extract the "treatment effect" & least square means for each treatment group. #' 5. Repeat points 2-3 for all other values in `visits`. #' -#' If no value for `visits` is provided, then it will be set to +#' If no value for `visits` is provided then it will be set to #' `unique(data[[vars$visit]])`. #' -#' In order to meet the formatting standards set by [rbmi_analyse()], the results will be collapsed +#' In order to meet the formatting standards set by [rbmi_analyse()] the results will be collapsed #' into a single list suffixed by the visit name, e.g.: #' ``` #' list( @@ -48,7 +48,7 @@ #' The new "var" refers to the model estimated variance of the residuals. #' #' If you want to include interaction terms in your model this can be done -#' by providing them to the `covariates` argument of [rbmi::set_vars()] +#' by providing them to the `covariates` argument of the set_vars() function from the rbmi package #' e.g. `set_vars(covariates = c("sex*age"))`. #' #' @return a list of variance (`var_*`), treatment effect (`trt_*`), and @@ -64,13 +64,15 @@ #' #' @seealso [rbmi_analyse()] #' @seealso [stats::lm()] -#' @seealso [rbmi::set_vars()] +#' @seealso The set_vars() function from the rbmi package #' @export rbmi_ancova <- function( - data, - vars, - visits = NULL, - weights = c("counterfactual", "equal", "proportional_em", "proportional")) { + data, + vars, + visits = NULL, + weights = c("counterfactual", "equal", "proportional_em", "proportional") +) { + assert_rbmi() outcome <- vars[["outcome"]] group <- vars[["group"]] covariates <- vars[["covariates"]] @@ -105,10 +107,9 @@ rbmi_ancova <- function( #' @description #' Performance analysis of covariance. See [rbmi_ancova()] for full details. #' -#' @param outcome (`string`)\cr name of the outcome variable in `data`. -#' @param group (`string`)\cr name of the group variable in `data`. -#' @param covariates (`character vector`)\cr character vector containing the -#' name of any additional covariates +#' @param outcome string, the name of the outcome variable in `data`. +#' @param group string, the name of the group variable in `data`. +#' @param covariates character vector containing the name of any additional covariates #' to be included in the model as well as any interaction terms. #' #' @inheritParams rbmi_ancova @@ -121,17 +122,21 @@ rbmi_ancova <- function( #' `trt_*` and `lsm_*` entries. See [rbmi_ancova()] for full details. #' @examples #' -#' iris2 <- iris[iris$Species %in% c("versicolor", "virginica"), ] -#' iris2$Species <- factor(iris2$Species) -#' rbmi_ancova_single(iris2, "Sepal.Length", "Species", c("Petal.Length * Petal.Width")) +#' if (requireNamespace("rbmi", quietly = TRUE)) { +#' iris2 <- iris[iris$Species %in% c("versicolor", "virginica"), ] +#' iris2$Species <- factor(iris2$Species) +#' rbmi_ancova_single(iris2, "Sepal.Length", "Species", c("Petal.Length * Petal.Width")) +#' } #' #' @seealso [rbmi_ancova()] rbmi_ancova_single <- function( - data, - outcome, - group, - covariates, - weights = c("counterfactual", "equal", "proportional_em", "proportional")) { + data, + outcome, + group, + covariates, + weights = c("counterfactual", "equal", "proportional_em", "proportional") +) { + assert_rbmi() checkmate::assert_string(outcome) checkmate::assert_string(group) weights <- match.arg(weights) diff --git a/R/h_freq_funs.R b/R/h_freq_funs.R index 87c727ec..6b1f602c 100644 --- a/R/h_freq_funs.R +++ b/R/h_freq_funs.R @@ -27,6 +27,13 @@ get_ctrl_subset <- function(df, trt_var, ctrl_grp) { #' @noRd #' @keywords Internal h_create_altdf <- function(.spl_context, .df_row, denomdf, denom_by = NULL, id, variables, denom) { + colid <- .spl_context$cur_col_id[[1]] + inriskdiffcol <- grepl("difference", tolower(colid), fixed = TRUE) + if (is.null(denomdf) && denom %in% c("N_col") && length(.spl_context$split) > 1 && inriskdiffcol) { + stop("In order to get correct numbers in relative risk column, + the alt_counts_df dataset should be passed to build_table") + } + ### parent df in the current row-split (all col splits are still in) pardf <- .spl_context$full_parent_df[[NROW(.spl_context)]] diff --git a/R/mmrm_rbmi.R b/R/mmrm_rbmi.R index 2eee6ccf..817b507e 100644 --- a/R/mmrm_rbmi.R +++ b/R/mmrm_rbmi.R @@ -5,7 +5,8 @@ #' group) and the least square means estimates in each group. #' #' @param data (`data.frame`)\cr containing the data to be used in the model. -#' @param vars (`vars`)\cr list as generated by [rbmi::set_vars()]. Only the `subjid`, `group`, +#' @param vars (`vars`)\cr list as generated by the set_vars() function from the rbmi package. +#' Only the `subjid`, `group`, #' `visit`, `outcome` and `covariates` elements are required. See details. #' @param cov_struct (`string`)\cr the covariance structure to use. Note that the same #' covariance structure is assumed for all treatment groups. @@ -27,7 +28,8 @@ #' 3. Extract the 'treatment effect' & least square means for each treatment group #' vs the control group. #' -#' In order to meet the formatting standards set by [rbmi::analyse()] the results will be collapsed +#' In order to meet the formatting standards set by the analyse() function from the rbmi package, +#' the results will be collapsed #' into a single list suffixed by the visit name, e.g.: #' ``` #' list( @@ -48,7 +50,7 @@ #' visit, together with the degrees of freedom (which is treatment group specific). #' #' If you want to include additional interaction terms in your model this can be done -#' by providing them to the `covariates` argument of [rbmi::set_vars()] +#' by providing them to the `covariates` argument of the set_vars() function from the rbmi package #' e.g. `set_vars(covariates = c('sex*age'))`. #' #' @note The `group` and `visit` interaction `group:visit` is not included by @@ -58,15 +60,17 @@ #' #' @seealso [rbmi_analyse()] #' @seealso [mmrm::mmrm()] -#' @seealso [rbmi::set_vars()] +#' @seealso The set_vars() function from the rbmi package #' @export rbmi_mmrm <- function( - data, - vars, - cov_struct = c("us", "toep", "cs", "ar1"), - visits = NULL, - weights = c("counterfactual", "equal"), - ...) { + data, + vars, + cov_struct = c("us", "toep", "cs", "ar1"), + visits = NULL, + weights = c("counterfactual", "equal"), + ... +) { + assert_rbmi() subjid <- vars[["subjid"]] outcome <- vars[["outcome"]] group <- vars[["group"]] diff --git a/R/pool_rbmi.R b/R/pool_rbmi.R index 16a7173d..a0bf4a47 100644 --- a/R/pool_rbmi.R +++ b/R/pool_rbmi.R @@ -1,9 +1,9 @@ #' Pool analysis results obtained from the imputed datasets #' #' @details This has been forked from the `rbmi` package, mainly to support in -#' addition the pooling of variance estimates. See [rbmi::pool()] for more details. +#' addition the pooling of variance estimates. See `pool()` for more details. #' -#' @param results an analysis object created by [rbmi::analyse()]. +#' @param results an analysis object created by rbmi's `analyse()`. #' #' @param conf.level confidence level of the returned confidence interval. #' Must be a single number between 0 and 1. Default is 0.95. @@ -14,17 +14,20 @@ #' @param type a character string of either `"percentile"` (default) or #' `"normal"`. Determines what method should be used to calculate the bootstrap #' confidence intervals. See details. -#' Only used if `rbmi::method_condmean(type = "bootstrap")` was specified -#' in the original call to [rbmi::draws()]. +#' Only used if `method_condmean(type = "bootstrap")` was specified +#' in the original call to draws(). #' #' @returns A list of class `pool`. #' #' @export rbmi_pool <- function( - results, - conf.level = 0.95, - alternative = c("two.sided", "less", "greater"), - type = c("percentile", "normal")) { + results, + conf.level = 0.95, + alternative = c("two.sided", "less", "greater"), + type = c("percentile", "normal") +) { + assert_rbmi() + rbmi::validate(results) alternative <- match.arg(alternative) diff --git a/R/rbmi.R b/R/rbmi.R index f2b0dba9..23f95d72 100644 --- a/R/rbmi.R +++ b/R/rbmi.R @@ -47,7 +47,9 @@ find_missing_chg_after_avisit <- function(df) { visit_levels_missing <- as.integer(df[is.na(df$CHG), ]$AVISIT) # Missing visits at the end. - visit_levels_missing_end <- visit_levels_missing[visit_levels_missing > visit_levels_max_available] + visit_levels_missing_end <- visit_levels_missing[ + visit_levels_missing > visit_levels_max_available + ] # Return first one if there is any. if (length(visit_levels_missing_end)) { @@ -97,7 +99,14 @@ find_missing_chg_after_avisit <- function(df) { #' closeAllConnections() #' } #' @export -make_rbmi_cluster <- function(cluster_or_cores = 1, objects = NULL, packages = NULL) { # nocov start +make_rbmi_cluster <- function( + cluster_or_cores = 1, + objects = NULL, + packages = NULL +) { + # nocov start + assert_rbmi() + if (is.numeric(cluster_or_cores) && cluster_or_cores == 1) { return(NULL) } else if (is.numeric(cluster_or_cores)) { @@ -105,7 +114,10 @@ make_rbmi_cluster <- function(cluster_or_cores = 1, objects = NULL, packages = N } else if (methods::is(cluster_or_cores, "cluster")) { cl <- cluster_or_cores } else { - stop(sprintf("`cluster_or_cores` has unsupported class of: %s", paste(class(cluster_or_cores), collapse = ", "))) + stop(sprintf( + "`cluster_or_cores` has unsupported class of: %s", + paste(class(cluster_or_cores), collapse = ", ") + )) } # Load user defined objects into the globalname space @@ -115,7 +127,7 @@ make_rbmi_cluster <- function(cluster_or_cores = 1, objects = NULL, packages = N } # Load user defined packages - packages <- c(packages, "assertthat") + packages <- c(packages, "assertthat", "junco") # Remove attempts to load `rbmi` as this will be covered later packages <- setdiff(packages, "rbmi") devnull <- parallel::clusterCall( @@ -128,7 +140,10 @@ make_rbmi_cluster <- function(cluster_or_cores = 1, objects = NULL, packages = N parallel::clusterSetRNGStream(cl, sample.int(1)) # If user has previously configured `rbmi` sub-processes then early exit - exported_rbmi <- unlist(parallel::clusterEvalQ(cl, exists("..exported..parallel..rbmi"))) + exported_rbmi <- unlist(parallel::clusterEvalQ( + cl, + exists("..exported..parallel..rbmi") + )) if (all(exported_rbmi)) { return(cl) } @@ -178,7 +193,7 @@ par_lapply <- function(cl, fun, x, ...) { #' #' @description #' This function takes multiple imputed datasets (as generated by -#' the [rbmi::impute()] function) and runs an analysis function on +#' the impute() function from the rbmi package) and runs an analysis function on #' each of them. #' #' @importFrom assertthat assert_that @@ -199,7 +214,7 @@ par_lapply <- function(cl, fun, x, ...) { #' `fun` must return a named list with each element itself being a #' list containing a single #' numeric element called `est` (or additionally `se` and `df` if -#' you had originally specified [rbmi::method_bayes()] or [rbmi::method_approxbayes()]) +#' you had originally specified the method_bayes() or method_approxbayes() functions from the rbmi package) #' i.e.: #' \preformatted{ #' myfun <- function(dat, ...) { @@ -222,17 +237,17 @@ par_lapply <- function(cl, fun, x, ...) { #' } #' #' Please note that the `vars$subjid` column (as defined in the original call to -#' [rbmi::draws()]) will be scrambled in the data.frames that are provided to `fun`. +#' the draws() function from the rbmi package) will be scrambled in the data.frames that are provided to `fun`. #' This is to say they will not contain the original subject values and as such #' any hard coding of subject ids is strictly to be avoided. #' #' By default `fun` is the [rbmi_ancova()] function. #' Please note that this function -#' requires that a `vars` object, as created by [rbmi::set_vars()], is provided via -#' the `vars` argument e.g. `rbmi_analyse(imputeObj, vars = rbmi::set_vars(...))`. Please +#' requires that a `vars` object, as created by the set_vars() function from the rbmi package, is provided via +#' the `vars` argument e.g. `rbmi_analyse(imputeObj, vars = set_vars(...))`. Please #' see the documentation for [rbmi_ancova()] for full details. #' Please also note that the theoretical justification for the conditional mean imputation -#' method (`method = method_condmean()` in [rbmi::draws()]) relies on the fact that ANCOVA is +#' method (`method = method_condmean()` in the draws() function from the rbmi package) relies on the fact that ANCOVA is #' a linear transformation of the outcomes. #' Thus care is required when applying alternative analysis functions in this setting. #' @@ -240,7 +255,7 @@ par_lapply <- function(cl, fun, x, ...) { #' to the outcome variable in the imputed datasets prior to the analysis. #' This is typically used for sensitivity or tipping point analyses. The #' delta dataset must contain columns `vars$subjid`, `vars$visit` (as specified -#' in the original call to [rbmi::draws()]) and `delta`. Essentially this `data.frame` +#' in the original call to the draws() function from the rbmi package) and `delta`. Essentially this `data.frame` #' is merged onto the imputed dataset by `vars$subjid` and `vars$visit` and then #' the outcome variable is modified by: #' @@ -251,21 +266,18 @@ par_lapply <- function(cl, fun, x, ...) { #' Please note that in order to provide maximum flexibility, the `delta` argument #' can be used to modify any/all outcome values including those that were not #' imputed. Care must be taken when defining offsets. It is recommend that you -#' use the helper function [rbmi::delta_template()] to define the delta datasets as +#' use the helper function delta_template() from the rbmi package to define the delta datasets as #' this provides utility variables such as `is_missing` which can be used to identify #' exactly which visits have been imputed. #' -#' @seealso [rbmi::extract_imputed_dfs()] for manually extracting imputed +#' @seealso The extract_imputed_dfs() function from the rbmi package for manually extracting imputed #' datasets. -#' @seealso [rbmi::delta_template()] for creating delta data.frames. +#' @seealso The delta_template() function from the rbmi package for creating delta data.frames. #' @seealso [rbmi_ancova()] for the default analysis function. #' -#' @param imputations (`imputations`)\cr -#' An `imputations` object as created by [rbmi::impute()]. -#' @param fun (`function`)\cr -#' An analysis function to be applied to each imputed dataset. See details. -#' @param delta (`data.frame`)\cr -#' A `data.frame` containing the delta transformation to be applied to the imputed +#' @param imputations An `imputations` object as created by the impute() function from the rbmi package. +#' @param fun An analysis function to be applied to each imputed dataset. See details. +#' @param delta A `data.frame` containing the delta transformation to be applied to the imputed #' datasets prior to running `fun`. See details. #' @param ... Additional arguments passed onto `fun`. #' @param cluster_or_cores (`numeric` or `cluster object`)\cr @@ -300,12 +312,14 @@ par_lapply <- function(cl, fun, x, ...) { #' Note that there is significant overhead both with setting up the sub-processes and with #' transferring data back-and-forth between the main process and the sub-processes. As such #' parallelisation of the `rbmi_analyse()` function tends to only be worth it when you have -#' `> 2000` samples generated by [rbmi::draws()]. Conversely using parallelisation if your samples +#' `> 2000` samples generated by the draws() function from the rbmi package. +#' Conversely using parallelisation if your samples #' are smaller than this may lead to longer run times than just running it sequentially. #' -#' It is important to note that the implementation of parallel processing within [rbmi::analyse()`] has -#' been optimised around the assumption that the parallel processes will be spawned on the same -#' machine and not a remote cluster. One such optimisation is that the required data is saved to +#' It is important to note that the implementation of parallel processing within the analyse() +#' function from the rbmi package has been optimised around the assumption that the parallel +#' processes will be spawned on the same machine and not a remote cluster. +#' One such optimisation is that the required data is saved to #' a temporary file on the local disk from which it is then read into each sub-process. This is #' done to avoid the overhead of transferring the data over the network. Our assumption is that #' if you are at the stage where you need to be parallelising your analysis over a remote cluster @@ -337,71 +351,87 @@ par_lapply <- function(cl, fun, x, ...) { #' @return An `analysis` object, as defined by `rbmi`, representing the desired #' analysis applied to each of the imputed datasets in `imputations`. #' @examples -#' library(rbmi) -#' library(dplyr) -#' -#' dat <- antidepressant_data -#' dat$GENDER <- as.factor(dat$GENDER) -#' dat$POOLINV <- as.factor(dat$POOLINV) -#' set.seed(123) -#' pat_ids <- sample(levels(dat$PATIENT), nlevels(dat$PATIENT) / 4) -#' dat <- dat |> -#' filter(PATIENT %in% pat_ids) |> -#' droplevels() -#' dat <- expand_locf( -#' dat, -#' PATIENT = levels(dat$PATIENT), -#' VISIT = levels(dat$VISIT), -#' vars = c("BASVAL", "THERAPY"), -#' group = c("PATIENT"), -#' order = c("PATIENT", "VISIT") -#' ) -#' dat_ice <- dat |> -#' arrange(PATIENT, VISIT) |> -#' filter(is.na(CHANGE)) |> -#' group_by(PATIENT) |> -#' slice(1) |> -#' ungroup() |> -#' select(PATIENT, VISIT) |> -#' mutate(strategy = "JR") -#' dat_ice <- dat_ice[-which(dat_ice$PATIENT == 3618), ] -#' vars <- set_vars( -#' outcome = "CHANGE", -#' visit = "VISIT", -#' subjid = "PATIENT", -#' group = "THERAPY", -#' covariates = c("THERAPY") -#' ) -#' drawObj <- draws( -#' data = dat, -#' data_ice = dat_ice, -#' vars = vars, -#' method = method_condmean(type = "jackknife", covariance = "csh"), -#' quiet = TRUE -#' ) -#' references <- c("DRUG" = "PLACEBO", "PLACEBO" = "PLACEBO") -#' imputeObj <- impute(drawObj, references) -#' -#' rbmi_analyse(imputations = imputeObj, vars = vars) +#' if (requireNamespace("rbmi", quietly = TRUE)) { +#' library(rbmi) +#' library(dplyr) +#' +#' dat <- antidepressant_data +#' dat$GENDER <- as.factor(dat$GENDER) +#' dat$POOLINV <- as.factor(dat$POOLINV) +#' set.seed(123) +#' pat_ids <- sample(levels(dat$PATIENT), nlevels(dat$PATIENT) / 4) +#' dat <- dat |> +#' filter(PATIENT %in% pat_ids) |> +#' droplevels() +#' dat <- expand_locf( +#' dat, +#' PATIENT = levels(dat$PATIENT), +#' VISIT = levels(dat$VISIT), +#' vars = c("BASVAL", "THERAPY"), +#' group = c("PATIENT"), +#' order = c("PATIENT", "VISIT") +#' ) +#' dat_ice <- dat |> +#' arrange(PATIENT, VISIT) |> +#' filter(is.na(CHANGE)) |> +#' group_by(PATIENT) |> +#' slice(1) |> +#' ungroup() |> +#' select(PATIENT, VISIT) |> +#' mutate(strategy = "JR") +#' dat_ice <- dat_ice[-which(dat_ice$PATIENT == 3618), ] +#' vars <- set_vars( +#' outcome = "CHANGE", +#' visit = "VISIT", +#' subjid = "PATIENT", +#' group = "THERAPY", +#' covariates = c("THERAPY") +#' ) +#' drawObj <- draws( +#' data = dat, +#' data_ice = dat_ice, +#' vars = vars, +#' method = method_condmean(type = "jackknife", covariance = "csh"), +#' quiet = TRUE +#' ) +#' references <- c("DRUG" = "PLACEBO", "PLACEBO" = "PLACEBO") +#' imputeObj <- impute(drawObj, references) +#' +#' rbmi_analyse(imputations = imputeObj, vars = vars) +#' } #' @export -rbmi_analyse <- function(imputations, fun = rbmi_ancova, delta = NULL, ..., cluster_or_cores = 1, .validate = TRUE) { - # nocov - - if (.validate) rbmi::validate(imputations) - +rbmi_analyse <- function( + imputations, + fun = rbmi_ancova, + delta = NULL, + ..., + cluster_or_cores = 1, + .validate = TRUE +) { + # nocov start + assert_rbmi() + if (.validate) { + rbmi::validate(imputations) + } assertthat::assert_that(is.function(fun), msg = "`fun` must be a function") - - assertthat::assert_that(is.null(delta) | is.data.frame(delta), msg = "`delta` must be NULL or a data.frame") - + assertthat::assert_that( + is.null(delta) | is.data.frame(delta), + msg = "`delta` must be NULL or a data.frame" + ) vars <- imputations$data$vars - if (.validate) devnull <- lapply(imputations$imputations, function(x) rbmi::validate(x)) + if (.validate) { + devnull <- lapply(imputations$imputations, function(x) rbmi::validate(x)) + } if (!is.null(delta)) { expected_vars <- c(vars$subjid, vars$visit, "delta") assertthat::assert_that( all(expected_vars %in% names(delta)), - msg = sprintf("The following variables must exist witin `delta`: `%s`", paste0(expected_vars, collapse = "`, `")) + msg = sprintf( + "The following variables must exist witin `delta`: `%s`", + paste0(expected_vars, collapse = "`, `") + ) ) } @@ -420,7 +450,11 @@ rbmi_analyse <- function(imputations, fun = rbmi_ancova, delta = NULL, ..., clus if (methods::is(cl, "cluster")) { ..rbmi..analysis..data..path <- tempfile() saveRDS(objects, file = ..rbmi..analysis..data..path, compress = FALSE) - devnull <- parallel::clusterExport(cl, "..rbmi..analysis..data..path", environment()) + devnull <- parallel::clusterExport( + cl, + "..rbmi..analysis..data..path", + environment() + ) devnull <- parallel::clusterEvalQ(cl, { ..rbmi..analysis..objects <- readRDS(..rbmi..analysis..data..path) list2env(..rbmi..analysis..objects, envir = environment()) @@ -481,4 +515,5 @@ rbmi_analyse <- function(imputations, fun = rbmi_ancova, delta = NULL, ..., clus ) rbmi::validate(ret) return(ret) + # nocov end } diff --git a/R/sorting_functions.R b/R/sorting_functions.R index de86f518..3e9eb6d3 100644 --- a/R/sorting_functions.R +++ b/R/sorting_functions.R @@ -30,6 +30,7 @@ #' @returns A function which can be used as a score function (scorefun in `sort_at_path`). # @examples #result <- sort_at_path(result, c('root', 'AEBODSYS'), scorefun = jj_complex_scorefun()) #' @examples +#' library(dplyr) #' ADAE <- data.frame( #' USUBJID = c( #' "XXXXX01", "XXXXX02", "XXXXX03", "XXXXX04", "XXXXX05", @@ -71,6 +72,8 @@ #' #' ref_path <- c("colspan_trt", " ", "TRT01A", "Placebo") #' +#' ADSL <- unique(ADAE |> select(USUBJID, "colspan_trt", "rrisk_header", "rrisk_label", "TRT01A")) +#' #' lyt <- basic_table() |> #' split_cols_by( #' "colspan_trt", @@ -120,7 +123,7 @@ #' ) #' ) #' -#' result <- build_table(lyt, ADAE) +#' result <- build_table(lyt, ADAE, alt_counts_df = ADSL) #' #' result #' @@ -140,11 +143,12 @@ #' @rdname complex_scoring_function #' @aliases jj_complex_scorefun jj_complex_scorefun <- function( - spanningheadercolvar = "colspan_trt", - usefirstcol = FALSE, - colpath = NULL, - firstcat = NULL, - lastcat = NULL) { + spanningheadercolvar = "colspan_trt", + usefirstcol = FALSE, + colpath = NULL, + firstcat = NULL, + lastcat = NULL +) { paths <- NULL function(tt) { diff --git a/R/tt_to_tblfile.R b/R/tt_to_tblfile.R index 482fa3a8..8b43e07d 100644 --- a/R/tt_to_tblfile.R +++ b/R/tt_to_tblfile.R @@ -4,19 +4,36 @@ #' @param fontspec (`font_spec`)\cr Font specification object #' @param string_map (`list`)\cr Unicode mapping for special characters #' @param markup_df (`data.frame`)\cr Data frame containing markup information +#' @param validate logical(1). Whether to validate the table structure using +#' `rtables::validate_table_struct()`. Defaults to `TRUE`. If `FALSE`, a message +#' will be displayed instead of stopping with an error when validation fails. #' @return `tt` represented as a `tbl` data.frame suitable for passing #' to [tidytlg::gentlg] via the `huxme` argument. tt_to_tbldf <- function( tt, fontspec = font_spec("Times", 9L, 1), string_map = default_str_map, - markup_df = dps_markup_df + markup_df = dps_markup_df, + validate = TRUE ) { - if (!validate_table_struct(tt)) { - stop( - "invalid table structure. summarize_row_groups without ", - "analyze below it in layout structure?" - ) + if (validate) { + if (!validate_table_struct(tt)) { + stop( + "Invalid table structure detected. summarize_row_groups without ", + "analyze below it in layout structure?" + ) + } + } else { + if (!validate_table_struct(tt)) { + message( + "Invalid table structure detected. This may cause issues in the output. ", + "The validation process failed, proceed with caution." + ) + } else { + message( + "Table structure validation succeeded. You should not need to set validate=FALSE." + ) + } } mpf <- matrix_form( tt, @@ -256,8 +273,8 @@ get_ncol <- function(tt) { #' sas performs nearest-value rounding consistent with rounding within SAS. #' See `[formatters::format_value()]` for more details. #' @param validate logical(1). Whether to validate the table structure using -#' `rtables::validate_table_struct()`. Defaults to `TRUE`. This can also be disabled -#' globally by setting the environment variable `JUNCO_DISABLE_VALIDATION=TRUE`. +#' `rtables::validate_table_struct()`. Defaults to `TRUE`. If `FALSE`, a message +#' will be displayed when validation fails. #' @import rlistings #' @rdname tt_to_tlgrtf #' @export @@ -306,17 +323,20 @@ tt_to_tlgrtf <- function( validate = TRUE, ... ) { - # Validate table structure if requested and not disabled by environment variable - # nolint start - if (validate && tlgtype == "Table" && methods::is(tt, "VTableTree") && - Sys.getenv("JUNCO_DISABLE_VALIDATION") != "TRUE") { + if (validate && tlgtype == "Table" && methods::is(tt, "VTableTree")) { if (!rtables::validate_table_struct(tt)) { - warning( + message( "Invalid table structure detected. This may cause issues in the output. ", - "Use validate=FALSE to disable this warning or set JUNCO_DISABLE_VALIDATION=TRUE in your environment." + "The validation process failed, proceed with caution." ) } - } # nolint end + } else if (!validate && tlgtype == "Table" && methods::is(tt, "VTableTree")) { + if (rtables::validate_table_struct(tt)) { + message( + "Table structure validation succeeded. You should not need to set validate=FALSE." + ) + } + } orientation <- match.arg(orientation) newdev <- open_font_dev(fontspec) diff --git a/R/utils.R b/R/utils.R index d987f8cf..e02bf7cc 100644 --- a/R/utils.R +++ b/R/utils.R @@ -71,3 +71,18 @@ check_alt_df_full <- function(argument, values, .alt_df_full) { name, argument )) } + + +#' Assert that rbmi package is installed +#' +#' @description +#' Checks if the 'rbmi' package is installed and stops with an error message if it's not. +#' +#' @return Invisible NULL if rbmi is installed +#' @keywords internal +assert_rbmi <- function() { + if (!requireNamespace("rbmi", quietly = TRUE)) { + stop("The 'rbmi' package is needed for this function to work. Please install it.", call. = FALSE) + } + invisible(NULL) +} diff --git a/_pkgdown.yml b/_pkgdown.yml index 2f3aae40..2e4668be 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -39,6 +39,7 @@ reference: - a_proportion_ci_logical - a_summarize_aval_chg_diff_j - a_maxlev + - a_two_tier - rbmi_analyse - analyze_values - rbmi_ancova diff --git a/cran-comments.md b/cran-comments.md new file mode 100644 index 00000000..b05f1218 --- /dev/null +++ b/cran-comments.md @@ -0,0 +1,45 @@ +## R CMD check results + +❯ checking package dependencies ... ERROR + Package suggested but not available: 'rbmi' + + The suggested packages are required for a complete check. + Checking can be attempted without them by setting the environment + variable _R_CHECK_FORCE_SUGGESTS_ to a false value. + + See section 'The DESCRIPTION file' in the 'Writing R Extensions' + manual. + +❯ checking CRAN incoming feasibility ... [35s] NOTE + Maintainer: 'Gabriel Becker <gabembecker@gmail.com>' + + New submission + + Package was archived on CRAN + + CRAN repository db overrides: + X-CRAN-Comment: Archived on 2025-11-19 as requires archived package + 'rbmi'. + + Suggests or Enhances not in mainstream repositories: + rbmi + Availability using Additional_repositories specification: + rbmi yes https://insightsengineering.r-universe.dev/ + +1 error ✖ | 0 warnings ✔ | 1 note ✖ + +* We've moved 'rbmi' to Suggests and the package now works without it. + +## Test environments + +* local Windows install, R 4.5.0 +* ubuntu 22.04 (on GitHub Actions), R 4.5.0 +* win-builder (devel) + +## Downstream dependencies + +There are currently no downstream dependencies for this package. + +## Changes in this version + +* Moved 'rbmi' from 'Imports' to 'Suggests' and made package fully functional without it diff --git a/inst/WORDLIST b/inst/WORDLIST index 4a0e8bee..ca8d1b64 100644 --- a/inst/WORDLIST +++ b/inst/WORDLIST @@ -223,4 +223,13 @@ allparts pseudocolumn repo rlistings -tbl \ No newline at end of file +tbl +eq +fn +jj +pharmaverseadam +pharmaverseadamjnj +postfun +uc +unicodify +rbmi's \ No newline at end of file diff --git a/man/a_maxlev.Rd b/man/a_maxlev.Rd index e318a281..2f562eb1 100644 --- a/man/a_maxlev.Rd +++ b/man/a_maxlev.Rd @@ -11,7 +11,7 @@ a_maxlev( .var, .spl_context, id = "USUBJID", - denom_df, + .alt_df_full = NULL, any_level = FALSE, any_level_exclude = "Missing", ... @@ -32,10 +32,12 @@ that is passed by \code{rtables}.} \item{id}{(\code{string})\cr subject variable name.} -\item{denom_df}{(\code{data.frame})\cr -A dataset used to compute the denominator for proportions. Required when -the same subject appears multiple times in the dataset due to treatment -sequences.} +\item{.alt_df_full}{(\code{dataframe})\cr A dataset used to compute the +denominator for proportions. This is required when the same subject appears +multiple times in the dataset due to treatment sequences. +\code{colnames(.alt_df_full)} must be a superset of \code{id}. +This argument gets populated by the rtables split machinery +(see \link[rtables:additional_fun_params]{rtables::additional_fun_params}).} \item{any_level}{(\code{flag})\cr Should be set to \code{TRUE} when the function is used as a \code{cfun}.} @@ -103,12 +105,12 @@ lyt <- basic_table() |> summarize_row_groups( "AESEV", cfun = a_maxlev, - extra_args = list(id = "ID", denom_df = my_adsl, any_level = TRUE) + extra_args = list(id = "ID", any_level = TRUE) ) |> analyze( "AESEV", afun = a_maxlev, - extra_args = list(id = "ID", denom_df = my_adsl) + extra_args = list(id = "ID") ) -build_table(lyt, my_adae) +build_table(lyt, my_adae, alt_counts_df = my_adsl) } diff --git a/man/a_two_tier.Rd b/man/a_two_tier.Rd new file mode 100644 index 00000000..49121d65 --- /dev/null +++ b/man/a_two_tier.Rd @@ -0,0 +1,197 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/a_two_tier.R +\name{a_two_tier} +\alias{a_two_tier} +\title{Two Tier Analysis Function} +\usage{ +a_two_tier( + df, + labelstr = NULL, + .var, + .N_col, + .df_row, + inner_var, + drill_down_levs, + .spl_context, + use_all_levels = FALSE, + grp_fun, + detail_fun, + .alt_df_full = NULL, + ... +) +} +\arguments{ +\item{df}{(\code{data.frame})\cr data set containing all analysis variables.} + +\item{labelstr}{(\code{character})\cr label of the level of the parent split currently being summarized +(must be present as second argument in Content Row Functions). See \code{\link[rtables:summarize_row_groups]{rtables::summarize_row_groups()}} +for more information.} + +\item{.var}{(\code{string})\cr single variable name that is passed by \code{rtables} when requested +by a statistics function.} + +\item{.N_col}{(\code{integer})\cr column-wise N (column count) for the full column being analyzed that is typically +passed by \code{rtables}.} + +\item{.df_row}{(\code{data.frame})\cr data frame across all of the columns for the given row split.} + +\item{inner_var}{(\code{string})\cr single variable name to use when generating +the detail rows.} + +\item{drill_down_levs}{(\code{character})\cr the level(s) of \code{.var} under which +detail rows should be generated.} + +\item{.spl_context}{(\code{data.frame})\cr gives information about ancestor split states +that is passed by \code{rtables}.} + +\item{use_all_levels}{(\code{flag})\cr controls which factor levels will be +present for \code{inner_var} (both in \code{df}/\code{x} and in \code{.df_row}) when calling +\code{detail_fun}. +If \code{TRUE}, all levels (those present on the factor \code{.df_row[[inner_var]]}, +*regardless if the level is observed in the row group or not) will be +present when creating detail rows. +Otherwise (the default), only the levels observed +\emph{anywhere in the row group, i.e., within \code{.df_row}} will be present.} + +\item{grp_fun}{(\code{function})\cr analysis function to be used when generating +the "group summary" outer rows.} + +\item{detail_fun}{(\code{function})\cr analysis function to be used when generating +"detail" inner rows.} + +\item{.alt_df_full}{(\code{dataframe})\cr denominator dataset for fraction and +relative risk calculations.\cr +this argument gets populated by the rtables split machinery +(see \link[rtables:additional_fun_params]{rtables::additional_fun_params}).} + +\item{...}{additional arguments passed directly to \code{grp_fun} and \code{detail_fun}.} +} +\value{ +A \code{RowsVerticalSection} object including both the group row and all +detail rows, if applicable, for the facet. +} +\description{ +The analysis function used as an \code{afun} in \link[rtables]{analyze}. +This function simulates a final additional level of nesting with a +traditional analyze call inside it. + +This makes it possible to create what \emph{appear to be} group summary or +content rows and to \emph{optionally or conditionally} generate one or more +"detail" rows underneath it. + +For example, in a disposition table, one might want counts for completed +and ongoing patients with no further detail underneath, but a breakdown of +specific reasons beneath the count of patients who discontinued treatment. +} +\details{ +Both the analysis variable and \code{inner_var} must be factors. +Detail rows are differentiated by having an indent mod of one, causing them +to hang indented under their corresponding group row. +} +\note{ +In its current form, this function may not retain any formatting or +labeling instructions added by \code{grp_fun} or \code{detail_fun}, and it will +override any \code{.indent_mods} values specified by them. This behavior may +change in future versions. +} +\examples{ + +# Example 1 + +lyt_obs_levels <- basic_table() |> + split_cols_by("ARM") |> + split_rows_by("EOSSTT", child_labels = "hidden") |> + analyze("EOSSTT", + afun = a_two_tier, + extra_args = list( + grp_fun = simple_analysis, + detail_fun = simple_analysis, + inner_var = "DCSREAS", + drill_down_levs = "DISCONTINUED" + ) + ) + +tbl <- build_table(lyt_obs_levels, ex_adsl) +tbl + +lyt_all_levels <- basic_table() |> + split_cols_by("ARM") |> + split_rows_by("EOSSTT", child_labels = "hidden") |> + analyze("EOSSTT", + afun = a_two_tier, + extra_args = list( + grp_fun = simple_analysis, + detail_fun = simple_analysis, + inner_var = "DCSREAS", + drill_down_levs = "DISCONTINUED", + use_all_levels = TRUE + ) + ) + +adsl_subset <- subset(ex_adsl, DCSREAS != "ADVERSE EVENT") +levels(adsl_subset$DCSREAS) + +tbl_all_levels <- build_table(lyt_all_levels, adsl_subset) +tbl_all_levels + +tbl_obs_levels <- build_table(lyt_obs_levels, adsl_subset) +tbl_obs_levels + +# Example 2 + +library(dplyr) + +trtvar <- "ARM" +ctrl_grp <- "B: Placebo" + +adsl <- ex_adsl |> select(c("USUBJID", "STRATA1", "EOSSTT", "DCSREAS", all_of(trtvar))) +adsl$colspan_trt <- factor( + ifelse(adsl[[trtvar]] == ctrl_grp, " ", "Active Study Agent"), + levels = c("Active Study Agent", " ") +) +adsl$rrisk_header <- "Risk Difference (\%) (95\% CI)" +adsl$rrisk_label <- paste(adsl[[trtvar]], paste("vs", ctrl_grp)) + +colspan_trt_map <- create_colspan_map( + df = adsl, + non_active_grp = ctrl_grp, + non_active_grp_span_lbl = " ", + active_grp_span_lbl = "Active Study Agent", + colspan_var = "colspan_trt", + trt_var = trtvar +) + +a_freq_j_args <- list( + .stats = "count_unique_fraction", + denom = "n_altdf", + ref_path = c("colspan_trt", " ", trtvar, ctrl_grp) +) + +two_tier_args <- list( + grp_fun = a_freq_j, + detail_fun = a_freq_j, + inner_var = "DCSREAS", + drill_down_levs = "DISCONTINUED" +) + +lyt_rrisk <- basic_table() |> + split_cols_by("colspan_trt", split_fun = trim_levels_to_map(map = colspan_trt_map)) |> + split_cols_by(trtvar) |> + split_cols_by("rrisk_header", nested = FALSE) |> + split_cols_by(trtvar, labels_var = "rrisk_label", split_fun = remove_split_levels(ctrl_grp)) |> + split_rows_by("STRATA1") |> + split_rows_by("EOSSTT", child_labels = "hidden") |> + analyze("EOSSTT", afun = a_two_tier, extra_args = c(two_tier_args, a_freq_j_args)) + +adsl_subset <- subset( + adsl, + EOSSTT != "COMPLETED" & (is.na(DCSREAS) | DCSREAS != "PROTOCOL VIOLATION") +) + +tbl_rrisk <- build_table(lyt_rrisk, adsl_subset, alt_counts_df = adsl_subset) +tbl_rrisk + +} +\author{ +GB, WW. +} diff --git a/man/assert_rbmi.Rd b/man/assert_rbmi.Rd new file mode 100644 index 00000000..93a5609d --- /dev/null +++ b/man/assert_rbmi.Rd @@ -0,0 +1,15 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/utils.R +\name{assert_rbmi} +\alias{assert_rbmi} +\title{Assert that rbmi package is installed} +\usage{ +assert_rbmi() +} +\value{ +Invisible NULL if rbmi is installed +} +\description{ +Checks if the 'rbmi' package is installed and stops with an error message if it's not. +} +\keyword{internal} diff --git a/man/complex_scoring_function.Rd b/man/complex_scoring_function.Rd index bd82cd12..52cad397 100644 --- a/man/complex_scoring_function.Rd +++ b/man/complex_scoring_function.Rd @@ -51,6 +51,7 @@ This function is not really designed for tables that have sub-columns. However, default sorting behavior, they can simply specify their own colpath to use for sorting on (default = NULL) } \examples{ +library(dplyr) ADAE <- data.frame( USUBJID = c( "XXXXX01", "XXXXX02", "XXXXX03", "XXXXX04", "XXXXX05", @@ -92,6 +93,8 @@ colspan_trt_map <- create_colspan_map(ADAE, ref_path <- c("colspan_trt", " ", "TRT01A", "Placebo") +ADSL <- unique(ADAE |> select(USUBJID, "colspan_trt", "rrisk_header", "rrisk_label", "TRT01A")) + lyt <- basic_table() |> split_cols_by( "colspan_trt", @@ -141,7 +144,7 @@ lyt <- basic_table() |> ) ) -result <- build_table(lyt, ADAE) +result <- build_table(lyt, ADAE, alt_counts_df = ADSL) result diff --git a/man/rbmi_analyse.Rd b/man/rbmi_analyse.Rd index 5f3d8613..df8ce8f9 100644 --- a/man/rbmi_analyse.Rd +++ b/man/rbmi_analyse.Rd @@ -14,14 +14,11 @@ rbmi_analyse( ) } \arguments{ -\item{imputations}{(\code{imputations})\cr -An \code{imputations} object as created by \code{\link[rbmi:impute]{rbmi::impute()}}.} +\item{imputations}{An \code{imputations} object as created by the impute() function from the rbmi package.} -\item{fun}{(\code{function})\cr -An analysis function to be applied to each imputed dataset. See details.} +\item{fun}{An analysis function to be applied to each imputed dataset. See details.} -\item{delta}{(\code{data.frame})\cr -A \code{data.frame} containing the delta transformation to be applied to the imputed +\item{delta}{A \code{data.frame} containing the delta transformation to be applied to the imputed datasets prior to running \code{fun}. See details.} \item{...}{Additional arguments passed onto \code{fun}.} @@ -41,7 +38,7 @@ analysis applied to each of the imputed datasets in \code{imputations}. } \description{ This function takes multiple imputed datasets (as generated by -the \code{\link[rbmi:impute]{rbmi::impute()}} function) and runs an analysis function on +the impute() function from the rbmi package) and runs an analysis function on each of them. } \details{ @@ -61,7 +58,7 @@ via \code{...}. \code{fun} must return a named list with each element itself being a list containing a single numeric element called \code{est} (or additionally \code{se} and \code{df} if -you had originally specified \code{\link[rbmi:method_bayes]{rbmi::method_bayes()}} or \code{\link[rbmi:method_approxbayes]{rbmi::method_approxbayes()}}) +you had originally specified the method_bayes() or method_approxbayes() functions from the rbmi package) i.e.: \preformatted{ myfun <- function(dat, ...) { @@ -84,17 +81,17 @@ myfun <- function(dat, ...) { } Please note that the \code{vars$subjid} column (as defined in the original call to -\code{\link[rbmi:draws]{rbmi::draws()}}) will be scrambled in the data.frames that are provided to \code{fun}. +the draws() function from the rbmi package) will be scrambled in the data.frames that are provided to \code{fun}. This is to say they will not contain the original subject values and as such any hard coding of subject ids is strictly to be avoided. By default \code{fun} is the \code{\link[=rbmi_ancova]{rbmi_ancova()}} function. Please note that this function -requires that a \code{vars} object, as created by \code{\link[rbmi:set_vars]{rbmi::set_vars()}}, is provided via -the \code{vars} argument e.g. \code{rbmi_analyse(imputeObj, vars = rbmi::set_vars(...))}. Please +requires that a \code{vars} object, as created by the set_vars() function from the rbmi package, is provided via +the \code{vars} argument e.g. \code{rbmi_analyse(imputeObj, vars = set_vars(...))}. Please see the documentation for \code{\link[=rbmi_ancova]{rbmi_ancova()}} for full details. Please also note that the theoretical justification for the conditional mean imputation -method (\code{method = method_condmean()} in \code{\link[rbmi:draws]{rbmi::draws()}}) relies on the fact that ANCOVA is +method (\code{method = method_condmean()} in the draws() function from the rbmi package) relies on the fact that ANCOVA is a linear transformation of the outcomes. Thus care is required when applying alternative analysis functions in this setting. @@ -102,7 +99,7 @@ The \code{delta} argument can be used to specify offsets to be applied to the outcome variable in the imputed datasets prior to the analysis. This is typically used for sensitivity or tipping point analyses. The delta dataset must contain columns \code{vars$subjid}, \code{vars$visit} (as specified -in the original call to \code{\link[rbmi:draws]{rbmi::draws()}}) and \code{delta}. Essentially this \code{data.frame} +in the original call to the draws() function from the rbmi package) and \code{delta}. Essentially this \code{data.frame} is merged onto the imputed dataset by \code{vars$subjid} and \code{vars$visit} and then the outcome variable is modified by: @@ -112,7 +109,7 @@ the outcome variable is modified by: Please note that in order to provide maximum flexibility, the \code{delta} argument can be used to modify any/all outcome values including those that were not imputed. Care must be taken when defining offsets. It is recommend that you -use the helper function \code{\link[rbmi:delta_template]{rbmi::delta_template()}} to define the delta datasets as +use the helper function delta_template() from the rbmi package to define the delta datasets as this provides utility variables such as \code{is_missing} which can be used to identify exactly which visits have been imputed. } @@ -141,10 +138,19 @@ parallel::stopCluster(cl) Note that there is significant overhead both with setting up the sub-processes and with transferring data back-and-forth between the main process and the sub-processes. As such parallelisation of the \code{rbmi_analyse()} function tends to only be worth it when you have -\verb{> 2000} samples generated by \code{\link[rbmi:draws]{rbmi::draws()}}. Conversely using parallelisation if your samples +\verb{> 2000} samples generated by the draws() function from the rbmi package. +Conversely using parallelisation if your samples are smaller than this may lead to longer run times than just running it sequentially. -It is important to note that the implementation of parallel processing within [rbmi::analyse()\verb{] has been optimised around the assumption that the parallel processes will be spawned on the same machine and not a remote cluster. One such optimisation is that the required data is saved to a temporary file on the local disk from which it is then read into each sub-process. This is done to avoid the overhead of transferring the data over the network. Our assumption is that if you are at the stage where you need to be parallelising your analysis over a remote cluster then you would likely be better off parallelising across multiple }rbmi\verb{runs rather than within a single}rbmi` run. +It is important to note that the implementation of parallel processing within the analyse() +function from the rbmi package has been optimised around the assumption that the parallel +processes will be spawned on the same machine and not a remote cluster. +One such optimisation is that the required data is saved to +a temporary file on the local disk from which it is then read into each sub-process. This is +done to avoid the overhead of transferring the data over the network. Our assumption is that +if you are at the stage where you need to be parallelising your analysis over a remote cluster +then you would likely be better off parallelising across multiple \code{rbmi} runs rather than within +a single \code{rbmi} run. Finally, if you are doing a tipping point analysis you can get a reasonable performance improvement by re-using the cluster between each call to \code{rbmi_analyse()} e.g. @@ -170,58 +176,60 @@ parallel::clusterStop(cl) } \examples{ -library(rbmi) -library(dplyr) - -dat <- antidepressant_data -dat$GENDER <- as.factor(dat$GENDER) -dat$POOLINV <- as.factor(dat$POOLINV) -set.seed(123) -pat_ids <- sample(levels(dat$PATIENT), nlevels(dat$PATIENT) / 4) -dat <- dat |> - filter(PATIENT \%in\% pat_ids) |> - droplevels() -dat <- expand_locf( - dat, - PATIENT = levels(dat$PATIENT), - VISIT = levels(dat$VISIT), - vars = c("BASVAL", "THERAPY"), - group = c("PATIENT"), - order = c("PATIENT", "VISIT") -) -dat_ice <- dat |> - arrange(PATIENT, VISIT) |> - filter(is.na(CHANGE)) |> - group_by(PATIENT) |> - slice(1) |> - ungroup() |> - select(PATIENT, VISIT) |> - mutate(strategy = "JR") -dat_ice <- dat_ice[-which(dat_ice$PATIENT == 3618), ] -vars <- set_vars( - outcome = "CHANGE", - visit = "VISIT", - subjid = "PATIENT", - group = "THERAPY", - covariates = c("THERAPY") -) -drawObj <- draws( - data = dat, - data_ice = dat_ice, - vars = vars, - method = method_condmean(type = "jackknife", covariance = "csh"), - quiet = TRUE -) -references <- c("DRUG" = "PLACEBO", "PLACEBO" = "PLACEBO") -imputeObj <- impute(drawObj, references) - -rbmi_analyse(imputations = imputeObj, vars = vars) +if (requireNamespace("rbmi", quietly = TRUE)) { + library(rbmi) + library(dplyr) + + dat <- antidepressant_data + dat$GENDER <- as.factor(dat$GENDER) + dat$POOLINV <- as.factor(dat$POOLINV) + set.seed(123) + pat_ids <- sample(levels(dat$PATIENT), nlevels(dat$PATIENT) / 4) + dat <- dat |> + filter(PATIENT \%in\% pat_ids) |> + droplevels() + dat <- expand_locf( + dat, + PATIENT = levels(dat$PATIENT), + VISIT = levels(dat$VISIT), + vars = c("BASVAL", "THERAPY"), + group = c("PATIENT"), + order = c("PATIENT", "VISIT") + ) + dat_ice <- dat |> + arrange(PATIENT, VISIT) |> + filter(is.na(CHANGE)) |> + group_by(PATIENT) |> + slice(1) |> + ungroup() |> + select(PATIENT, VISIT) |> + mutate(strategy = "JR") + dat_ice <- dat_ice[-which(dat_ice$PATIENT == 3618), ] + vars <- set_vars( + outcome = "CHANGE", + visit = "VISIT", + subjid = "PATIENT", + group = "THERAPY", + covariates = c("THERAPY") + ) + drawObj <- draws( + data = dat, + data_ice = dat_ice, + vars = vars, + method = method_condmean(type = "jackknife", covariance = "csh"), + quiet = TRUE + ) + references <- c("DRUG" = "PLACEBO", "PLACEBO" = "PLACEBO") + imputeObj <- impute(drawObj, references) + + rbmi_analyse(imputations = imputeObj, vars = vars) +} } \seealso{ -\code{\link[rbmi:extract_imputed_dfs]{rbmi::extract_imputed_dfs()}} for manually extracting imputed +The extract_imputed_dfs() function from the rbmi package for manually extracting imputed datasets. -\code{\link[rbmi:delta_template]{rbmi::delta_template()}} for creating delta data.frames. +The delta_template() function from the rbmi package for creating delta data.frames. \code{\link[=rbmi_ancova]{rbmi_ancova()}} for the default analysis function. } diff --git a/man/rbmi_ancova.Rd b/man/rbmi_ancova.Rd index 8ea2192d..9e3dc261 100644 --- a/man/rbmi_ancova.Rd +++ b/man/rbmi_ancova.Rd @@ -12,17 +12,17 @@ rbmi_ancova( ) } \arguments{ -\item{data}{(\code{data.frame})\cr A \code{data.frame} containing the data to be used in the model.} +\item{data}{A \code{data.frame} containing the data to be used in the model.} -\item{vars}{(\code{list})\cr A \code{vars} object as generated by \code{\link[rbmi:set_vars]{rbmi::set_vars()}}. Only the \code{group}, +\item{vars}{A \code{vars} object as generated by the set_vars() function from the rbmi package. Only the \code{group}, \code{visit}, \code{outcome} and \code{covariates} elements are required. See details.} -\item{visits}{(\verb{character vector})\cr An optional character vector specifying which visits to +\item{visits}{An optional character vector specifying which visits to fit the ancova model at. If \code{NULL}, a separate ancova model will be fit to the outcomes for each visit (as determined by \code{unique(data[[vars$visit]])}). See details.} -\item{weights}{(\code{character})\cr Character, either \code{"counterfactual"} (default), \code{"equal"}, +\item{weights}{Character, either \code{"counterfactual"} (default), \code{"equal"}, \code{"proportional_em"} or \code{"proportional"}. Specifies the weighting strategy to be used when calculating the lsmeans. See the weighting section for more details.} @@ -47,10 +47,10 @@ The function works as follows: \item Repeat points 2-3 for all other values in \code{visits}. } -If no value for \code{visits} is provided, then it will be set to +If no value for \code{visits} is provided then it will be set to \code{unique(data[[vars$visit]])}. -In order to meet the formatting standards set by \code{\link[=rbmi_analyse]{rbmi_analyse()}}, the results will be collapsed +In order to meet the formatting standards set by \code{\link[=rbmi_analyse]{rbmi_analyse()}} the results will be collapsed into a single list suffixed by the visit name, e.g.: \if{html}{\out{<div class="sourceCode">}}\preformatted{list( @@ -71,7 +71,7 @@ square mean results. In the above example \code{vars$group} has two factor level The new "var" refers to the model estimated variance of the residuals. If you want to include interaction terms in your model this can be done -by providing them to the \code{covariates} argument of \code{\link[rbmi:set_vars]{rbmi::set_vars()}} +by providing them to the \code{covariates} argument of the set_vars() function from the rbmi package e.g. \code{set_vars(covariates = c("sex*age"))}. } \note{ @@ -88,5 +88,5 @@ include: \code{\link[stats:lm]{stats::lm()}} -\code{\link[rbmi:set_vars]{rbmi::set_vars()}} +The set_vars() function from the rbmi package } diff --git a/man/rbmi_ancova_single.Rd b/man/rbmi_ancova_single.Rd index f948fa5f..72c53c2c 100644 --- a/man/rbmi_ancova_single.Rd +++ b/man/rbmi_ancova_single.Rd @@ -13,17 +13,16 @@ rbmi_ancova_single( ) } \arguments{ -\item{data}{(\code{data.frame})\cr A \code{data.frame} containing the data to be used in the model.} +\item{data}{A \code{data.frame} containing the data to be used in the model.} -\item{outcome}{(\code{string})\cr name of the outcome variable in \code{data}.} +\item{outcome}{string, the name of the outcome variable in \code{data}.} -\item{group}{(\code{string})\cr name of the group variable in \code{data}.} +\item{group}{string, the name of the group variable in \code{data}.} -\item{covariates}{(\verb{character vector})\cr character vector containing the -name of any additional covariates +\item{covariates}{character vector containing the name of any additional covariates to be included in the model as well as any interaction terms.} -\item{weights}{(\code{character})\cr Character, either \code{"counterfactual"} (default), \code{"equal"}, +\item{weights}{Character, either \code{"counterfactual"} (default), \code{"equal"}, \code{"proportional_em"} or \code{"proportional"}. Specifies the weighting strategy to be used when calculating the lsmeans. See the weighting section for more details.} @@ -43,9 +42,11 @@ Performance analysis of covariance. See \code{\link[=rbmi_ancova]{rbmi_ancova()} } \examples{ -iris2 <- iris[iris$Species \%in\% c("versicolor", "virginica"), ] -iris2$Species <- factor(iris2$Species) -rbmi_ancova_single(iris2, "Sepal.Length", "Species", c("Petal.Length * Petal.Width")) +if (requireNamespace("rbmi", quietly = TRUE)) { + iris2 <- iris[iris$Species \%in\% c("versicolor", "virginica"), ] + iris2$Species <- factor(iris2$Species) + rbmi_ancova_single(iris2, "Sepal.Length", "Species", c("Petal.Length * Petal.Width")) +} } \seealso{ diff --git a/man/rbmi_mmrm.Rd b/man/rbmi_mmrm.Rd index bf76294c..1656ab76 100644 --- a/man/rbmi_mmrm.Rd +++ b/man/rbmi_mmrm.Rd @@ -16,7 +16,8 @@ rbmi_mmrm( \arguments{ \item{data}{(\code{data.frame})\cr containing the data to be used in the model.} -\item{vars}{(\code{vars})\cr list as generated by \code{\link[rbmi:set_vars]{rbmi::set_vars()}}. Only the \code{subjid}, \code{group}, +\item{vars}{(\code{vars})\cr list as generated by the set_vars() function from the rbmi package. +Only the \code{subjid}, \code{group}, \code{visit}, \code{outcome} and \code{covariates} elements are required. See details.} \item{cov_struct}{(\code{string})\cr the covariance structure to use. Note that the same @@ -53,7 +54,8 @@ with the specified covariance structure for visits within subjects. vs the control group. } -In order to meet the formatting standards set by \code{\link[rbmi:analyse]{rbmi::analyse()}} the results will be collapsed +In order to meet the formatting standards set by the analyse() function from the rbmi package, +the results will be collapsed into a single list suffixed by the visit name, e.g.: \if{html}{\out{<div class="sourceCode">}}\preformatted{list( @@ -75,7 +77,7 @@ The new 'var' refers to the model estimated variance of the residuals at the giv visit, together with the degrees of freedom (which is treatment group specific). If you want to include additional interaction terms in your model this can be done -by providing them to the \code{covariates} argument of \code{\link[rbmi:set_vars]{rbmi::set_vars()}} +by providing them to the \code{covariates} argument of the set_vars() function from the rbmi package e.g. \code{set_vars(covariates = c('sex*age'))}. } \note{ @@ -88,5 +90,5 @@ you want to include it. This will make sense in most cases. \code{\link[mmrm:mmrm]{mmrm::mmrm()}} -\code{\link[rbmi:set_vars]{rbmi::set_vars()}} +The set_vars() function from the rbmi package } diff --git a/man/rbmi_pool.Rd b/man/rbmi_pool.Rd index 3d9446e4..0f1cbe0a 100644 --- a/man/rbmi_pool.Rd +++ b/man/rbmi_pool.Rd @@ -12,7 +12,7 @@ rbmi_pool( ) } \arguments{ -\item{results}{an analysis object created by \code{\link[rbmi:analyse]{rbmi::analyse()}}.} +\item{results}{an analysis object created by rbmi's \code{analyse()}.} \item{conf.level}{confidence level of the returned confidence interval. Must be a single number between 0 and 1. Default is 0.95.} @@ -23,8 +23,8 @@ must be one of \code{"two.sided"} (default), \code{"greater"} or \code{"less"}.} \item{type}{a character string of either \code{"percentile"} (default) or \code{"normal"}. Determines what method should be used to calculate the bootstrap confidence intervals. See details. -Only used if \code{rbmi::method_condmean(type = "bootstrap")} was specified -in the original call to \code{\link[rbmi:draws]{rbmi::draws()}}.} +Only used if \code{method_condmean(type = "bootstrap")} was specified +in the original call to draws().} } \value{ A list of class \code{pool}. @@ -34,5 +34,5 @@ Pool analysis results obtained from the imputed datasets } \details{ This has been forked from the \code{rbmi} package, mainly to support in -addition the pooling of variance estimates. See \code{\link[rbmi:pool]{rbmi::pool()}} for more details. +addition the pooling of variance estimates. See \code{pool()} for more details. } diff --git a/man/tt_to_tbldf.Rd b/man/tt_to_tbldf.Rd index 72a1b4b2..ef200676 100644 --- a/man/tt_to_tbldf.Rd +++ b/man/tt_to_tbldf.Rd @@ -8,7 +8,8 @@ tt_to_tbldf( tt, fontspec = font_spec("Times", 9L, 1), string_map = default_str_map, - markup_df = dps_markup_df + markup_df = dps_markup_df, + validate = TRUE ) } \arguments{ @@ -19,6 +20,10 @@ tt_to_tbldf( \item{string_map}{(\code{list})\cr Unicode mapping for special characters} \item{markup_df}{(\code{data.frame})\cr Data frame containing markup information} + +\item{validate}{logical(1). Whether to validate the table structure using +\code{rtables::validate_table_struct()}. Defaults to \code{TRUE}. If \code{FALSE}, a message +will be displayed instead of stopping with an error when validation fails.} } \value{ \code{tt} represented as a \code{tbl} data.frame suitable for passing diff --git a/man/tt_to_tlgrtf.Rd b/man/tt_to_tlgrtf.Rd index aa0e9124..e60fa8dd 100644 --- a/man/tt_to_tlgrtf.Rd +++ b/man/tt_to_tlgrtf.Rd @@ -90,8 +90,8 @@ sas performs nearest-value rounding consistent with rounding within SAS. See \verb{[formatters::format_value()]} for more details.} \item{validate}{logical(1). Whether to validate the table structure using -\code{rtables::validate_table_struct()}. Defaults to \code{TRUE}. This can also be disabled -globally by setting the environment variable \code{JUNCO_DISABLE_VALIDATION=TRUE}.} +\code{rtables::validate_table_struct()}. Defaults to \code{TRUE}. If \code{FALSE}, a message +will be displayed when validation fails.} \item{...}{Additional arguments passed to gentlg} } diff --git a/tests/testthat/_snaps/a_freq_j.md b/tests/testthat/_snaps/a_freq_j.md index f0eaa087..250992b7 100644 --- a/tests/testthat/_snaps/a_freq_j.md +++ b/tests/testthat/_snaps/a_freq_j.md @@ -33,3 +33,25 @@ Response A 6/50 (12.0%) 4/50 (8.0%) Response B 6/50 (12.0%) 4/50 (8.0%) +# a_freq_j in specific situation error for not passing alt_counts_df + + Code + result + Output + Active Study Agent Risk Difference (%) (95% CI) + A: Drug X C: Combination B: Placebo A: Drug X vs B: Placebo C: Combination vs B: Placebo + (N=134) (N=132) (N=134) (N=134) (N=132) + —————————————————————————————————————————————————————————————————————————————————————————————————————————————————— + A + COMPLETED 22 (16.4%) 22 (16.7%) 23 (17.2%) -0.7 (-9.7, 8.2) -0.5 (-9.5, 8.5) + DISCONTINUED 11 (8.2%) 13 (9.8%) 13 (9.7%) -1.5 (-8.3, 5.3) 0.1 (-7.0, 7.3) + ONGOING 5 (3.7%) 5 (3.8%) 8 (6.0%) -2.2 (-7.4, 2.9) -2.2 (-7.3, 3.0) + B + COMPLETED 23 (17.2%) 23 (17.4%) 19 (14.2%) 3.0 (-5.7, 11.7) 3.2 (-5.5, 12.0) + DISCONTINUED 12 (9.0%) 14 (10.6%) 18 (13.4%) -4.5 (-12.0, 3.1) -2.8 (-10.6, 5.0) + ONGOING 12 (9.0%) 6 (4.5%) 8 (6.0%) 3.0 (-3.3, 9.3) -1.4 (-6.8, 3.9) + C + COMPLETED 24 (17.9%) 27 (20.5%) 27 (20.1%) -2.2 (-11.6, 7.2) 0.3 (-9.4, 10.0) + DISCONTINUED 15 (11.2%) 12 (9.1%) 12 (9.0%) 2.2 (-5.0, 9.4) 0.1 (-6.8, 7.0) + ONGOING 10 (7.5%) 10 (7.6%) 6 (4.5%) 3.0 (-2.7, 8.6) 3.1 (-2.6, 8.8) + diff --git a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md index 965c495e..27c0a0b0 100644 --- a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md +++ b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md @@ -9,56 +9,16 @@ # get_formats_from_stats works as expected Code - res + normalize_fun(res$quantiles_upper) Output - $quantiles_upper - function(x, - output, - round_type = valid_round_type, - na_str = na_str_dflt) { - if (anyNA(na_str) || (replace_na_dflt && any(na_str == "NA"))) { - na_inds <- which(is.na(na_str) | (replace_na_dflt & na_str == "NA")) - na_str[na_inds] <- rep(na_str_dflt, length.out = length(na_str))[na_inds] - } - if (length(x) == 0 || isTRUE(all(x == ""))) { - return(NULL) - } else if (!length(positions[[1]]) == length(x)) { - stop( - "Error: input str in call to jjcsformat_xx must contain same number of xx as the number of stats." - ) - } - - round_type <- match.arg(round_type) - - values <- Map(y = x, fun = roundings, na_str = na_str, function(y, fun, na_str, output) { - fun(y, na_str = na_str, round_type = round_type) - }) - - regmatches(x = str, m = positions)[[1]] <- values - return(str) - } - <environment: base> - - $range_with_cens_info - function(x, output, round_type = valid_round_type, ...) { - round_type <- match.arg(round_type) - checkmate::assert_numeric( - x, - len = 4L, - finite = TRUE, - any.missing = FALSE - ) - checkmate::assert_true(all(x[c(3, 4)] %in% c(0, 1))) - - res <- vapply(x[c(1, 2)], FUN = function(x) { - format_value(x, format_xx, round_type = round_type) - }, character(1)) - if (x[3] == 1) res[1] <- paste0(res[1], "+") - if (x[4] == 1) res[2] <- paste0(res[2], "+") - paste0("(", res[1], ", ", res[2], ")") - } - <environment: base> - + [1] "{if(anyNA(na_str)||(replace_na_dflt&&any(na_str==\"NA\"))){na_inds<-which(is.na(na_str)|(replace_na_dflt&na_str==\"NA\"))na_str[na_inds]<-rep(na_str_dflt,length.out=length(na_str))[na_inds]}if(length(x)==0||isTRUE(all(x==\"\"))){return(NULL)}elseif(!length(positions[[1]])==length(x)){stop(\"Error:inputstrincalltojjcsformat_xxmustcontainsamenumberofxxasthenumberofstats.\")}round_type<-match.arg(round_type)values<-Map(y=x,fun=roundings,na_str=na_str,function(y,fun,na_str,output){fun(y,na_str=na_str,round_type=round_type)})regmatches(x=str,m=positions)[[1]]<-valuesreturn(str)}" + +--- + + Code + normalize_fun(res$range_with_cens_info) + Output + [1] "{round_type<-match.arg(round_type)checkmate::assert_numeric(x,len=4L,finite=TRUE,any.missing=FALSE)checkmate::assert_true(all(x[c(3,4)]%in%c(0,1)))res<-vapply(x[c(1,2)],FUN=function(x){format_value(x,format_xx,round_type=round_type)},character(1))if(x[3]==1)res[1]<-paste0(res[1],\"+\")if(x[4]==1)res[2]<-paste0(res[2],\"+\")paste0(\"(\",res[1],\",\",res[2],\")\")}" # get_labels_from_stats works as expected diff --git a/tests/testthat/helper_rbmi.R b/tests/testthat/helper_rbmi.R new file mode 100644 index 00000000..7c99cad9 --- /dev/null +++ b/tests/testthat/helper_rbmi.R @@ -0,0 +1,75 @@ +# Used in test-pool_rbmi.R ---- +rbmi_as_analysis <- function( + results, + method, + delta = NULL, + fun = NULL, + fun_name = NULL +) { + next_class <- switch(class(method)[[2]], + bayes = "rubin", + approxbayes = "rubin", + condmean = ifelse(method$type == "jackknife", "jackknife", "bootstrap"), + bmlmi = "bmlmi" + ) + assert_that( + is.list(results), + length(next_class) == 1, + is.character(next_class), + next_class %in% c("jackknife", "bootstrap", "rubin", "bmlmi") + ) + x <- list( + results = rbmi::as_class(results, c(next_class, "list")), + delta = delta, + fun = fun, + fun_name = fun_name, + method = method + ) + class(x) <- c("analysis", "list") + rbmi::validate(x) + return(x) +} + +# Used in test-rbmi.R ---- + +set_col_names <- function(x, nam) { + colnames(x) <- nam + return(x) +} + +f2n <- function(x) as.numeric(x) - 1 + +get_sim_data <- function(n, sigma, trt = 4) { + nv <- ncol(sigma) + covars <- tibble::tibble( + id = 1:n, + age = rnorm(n), + group = factor( + sample(c("A", "B"), size = n, replace = TRUE), + levels = c("A", "B") + ), + sex = factor( + sample(c("M", "F"), size = n, replace = TRUE), + levels = c("M", "F") + ) + ) + + dat <- mvtnorm::rmvnorm(n, sigma = sigma) |> + set_col_names(paste0("visit_", 1:nv)) |> + dplyr::as_tibble() |> + dplyr::mutate(id = seq_len(dplyr::n())) |> + tidyr::gather("visit", "outcome", -id) |> + dplyr::mutate(visit = factor(.data$visit)) |> + dplyr::arrange(id, .data$visit) |> + dplyr::left_join(covars, by = "id") |> + dplyr::mutate( + outcome = .data$outcome + + 5 + + 3 * .data$age + + 3 * f2n(.data$sex) + + trt * f2n(.data$group) + ) |> + dplyr::mutate(id = as.factor(id)) + + return(dat) +} diff --git a/tests/testthat/test-a_freq_j.R b/tests/testthat/test-a_freq_j.R index bbd764cd..6493f191 100644 --- a/tests/testthat/test-a_freq_j.R +++ b/tests/testthat/test-a_freq_j.R @@ -80,3 +80,47 @@ test_that("a_freq_j_with_exclude allows to exclude row split levels from the ana safe_prune_table(prune_func = tern::keep_rows(keep_non_null_rows)) expect_snapshot(result) }) + +test_that("a_freq_j in specific situation error for not passing alt_counts_df", { + library(dplyr) + trtvar <- "ARM" + ctrl_grp <- "B: Placebo" + + adsl <- ex_adsl |> select(c("USUBJID", "STRATA1", "EOSSTT", all_of(trtvar))) + adsl$colspan_trt <- factor( + ifelse(adsl[[trtvar]] == ctrl_grp, " ", "Active Study Agent"), + levels = c("Active Study Agent", " ") + ) + adsl$rrisk_header <- "Risk Difference (%) (95% CI)" + adsl$rrisk_label <- paste(adsl[[trtvar]], paste("vs", ctrl_grp)) + + colspan_trt_map <- create_colspan_map( + df = adsl, + non_active_grp = ctrl_grp, + non_active_grp_span_lbl = " ", + active_grp_span_lbl = "Active Study Agent", + colspan_var = "colspan_trt", + trt_var = trtvar + ) + + a_freq_j_args <- list( + .stats = "count_unique_fraction", + ref_path = c("colspan_trt", " ", trtvar, ctrl_grp) + ) + + lyt <- basic_table(show_colcounts = TRUE) |> + split_cols_by("colspan_trt", split_fun = trim_levels_to_map(map = colspan_trt_map)) |> + split_cols_by(trtvar) |> + split_cols_by("rrisk_header", nested = FALSE) |> + split_cols_by(trtvar, labels_var = "rrisk_label", split_fun = remove_split_levels(ctrl_grp)) |> + split_rows_by("STRATA1") |> + analyze("EOSSTT", afun = a_freq_j, extra_args = a_freq_j_args) + + expect_error( + build_table(lyt, adsl), + "In order to get correct numbers in relative risk column" + ) + + result <- build_table(lyt, adsl, alt_counts_df = adsl) + expect_snapshot(result) +}) diff --git a/tests/testthat/test-a_freq_resp_var_j.R b/tests/testthat/test-a_freq_resp_var_j.R index c3fa105f..5c7e6754 100644 --- a/tests/testthat/test-a_freq_resp_var_j.R +++ b/tests/testthat/test-a_freq_resp_var_j.R @@ -16,8 +16,8 @@ test_that("a_freq_resp_var_j works as expected with basic usage", { ) # Create the layout based on patterns seen in test-varia.R - lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( "SEX", afun = a_freq_resp_var_j, @@ -54,8 +54,8 @@ test_that("a_freq_resp_var_j works with factor responses", { ) # Create the layout - lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( "SEX", afun = a_freq_resp_var_j, @@ -86,8 +86,8 @@ test_that("a_freq_resp_var_j handles missing values correctly", { adrs$SEX[6:10] <- NA # Create the layout - lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( "SEX", afun = a_freq_resp_var_j, @@ -117,8 +117,8 @@ test_that("a_freq_resp_var_j errors on invalid responses", { adrs$RSP[1:3] <- "MAYBE" # Create the layout - lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( "SEX", afun = a_freq_resp_var_j, @@ -148,8 +148,8 @@ test_that("a_freq_resp_var_j errors when resp_var is null", { ) # Create layout with missing resp_var - lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( "SEX", afun = a_freq_resp_var_j @@ -178,8 +178,8 @@ test_that("a_freq_resp_var_j works with drop_levels parameter", { ) # Create layout with drop_levels = TRUE - lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( "SEX", afun = a_freq_resp_var_j, @@ -210,8 +210,8 @@ test_that("a_freq_resp_var_j works with riskdiff parameter", { ) # Create layout with drop_levels = TRUE - lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( "SEX", afun = a_freq_resp_var_j, diff --git a/tests/testthat/test-a_maxlev.R b/tests/testthat/test-a_maxlev.R index ab9ee8ab..e1e53f8d 100644 --- a/tests/testthat/test-a_maxlev.R +++ b/tests/testthat/test-a_maxlev.R @@ -4,7 +4,7 @@ aesevall_spf <- make_combo_splitfun( levels = NULL, ) -#### Actual start of tests +# Start of tests ---- test_that("a_maxlev produces correct numbers for single treatment per subject", { my_adsl <- ex_adsl[, c("USUBJID", "ARM", "ACTARM")] @@ -22,11 +22,11 @@ test_that("a_maxlev produces correct numbers for single treatment per subject", split_rows_by("AESEV", split_fun = aesevall_spf) |> summarize_row_groups( "AESEV", - cfun = a_maxlev, extra_args = list(denom_df = my_adsl, any_level = TRUE) + cfun = a_maxlev, extra_args = list(any_level = TRUE) ) |> - analyze("AESEV", afun = a_maxlev, extra_args = list(denom_df = my_adsl)) + analyze("AESEV", afun = a_maxlev) - res <- expect_silent(build_table(lyt, my_adae)) + res <- expect_silent(build_table(lyt, my_adae, alt_counts_df = my_adsl)) res_act <- matrix_form(res)$string res_exp <- structure( c( @@ -62,15 +62,15 @@ test_that("a_maxlev produces correct numbers for sequence of treatments (missing summarize_row_groups( "AESEV", cfun = a_maxlev, - extra_args = list(id = "ID", denom_df = my_adsl, any_level = TRUE) + extra_args = list(id = "ID", any_level = TRUE) ) |> analyze( "AESEV", afun = a_maxlev, - extra_args = list(id = "ID", denom_df = my_adsl) + extra_args = list(id = "ID") ) - res <- expect_silent(build_table(lyt, my_adae)) + res <- expect_silent(build_table(lyt, my_adae, alt_counts_df = my_adsl)) res_act <- matrix_form(res)$string res_exp <- structure( c( @@ -106,15 +106,15 @@ test_that("a_maxlev produces correct numbers for sequence of treatments (all val summarize_row_groups( "AESEV", cfun = a_maxlev, - extra_args = list(id = "ID", denom_df = my_adsl, any_level = TRUE) + extra_args = list(id = "ID", any_level = TRUE) ) |> analyze( "AESEV", afun = a_maxlev, - extra_args = list(id = "ID", denom_df = my_adsl) + extra_args = list(id = "ID") ) - res <- expect_silent(build_table(lyt, my_adae)) + res <- expect_silent(build_table(lyt, my_adae, alt_counts_df = my_adsl)) res_act <- matrix_form(res)$string res_exp <- structure( c( @@ -150,10 +150,10 @@ test_that("a_maxlev produces correct numbers when any_level is active for custom summarize_row_groups( "AESEV", cfun = a_maxlev, - extra_args = list(id = "ID", denom_df = my_adsl, any_level = TRUE, any_level_exclude = "Mild") + extra_args = list(id = "ID", any_level = TRUE, any_level_exclude = "Mild") ) - res <- expect_silent(build_table(lyt, my_adae)) + res <- expect_silent(build_table(lyt, my_adae, alt_counts_df = my_adsl)) res_act <- matrix_form(res)$string res_exp <- structure( c("", "Any AE", "a", "0", "b", "1 (33.3%)", "c", "1 (33.3%)", "Total", "2 (66.7%)"), @@ -183,16 +183,16 @@ test_that("a_maxlev throws an error for not ordered variable", { summarize_row_groups( "AESEV", cfun = a_maxlev, - extra_args = list(id = "ID", denom_df = my_adsl, any_level = TRUE) + extra_args = list(id = "ID", any_level = TRUE) ) |> analyze( "AESEV", afun = a_maxlev, - extra_args = list(id = "ID", denom_df = my_adsl) + extra_args = list(id = "ID") ) res <- expect_error( - build_table(lyt, my_adae), + build_table(lyt, my_adae, alt_counts_df = my_adsl), regexp = "*.var.*.[Mm]ust be an ordered factor*." ) }) diff --git a/tests/testthat/test-a_summarize_aval_chg_diff.R b/tests/testthat/test-a_summarize_aval_chg_diff.R index 062685d3..d1431547 100644 --- a/tests/testthat/test-a_summarize_aval_chg_diff.R +++ b/tests/testthat/test-a_summarize_aval_chg_diff.R @@ -57,7 +57,7 @@ test_that("a_summarize_aval_chg_diff_j works as expected", { CHG = c(2, 3, -1, 9, -2, 0, 6, -2, 5, 2) ) - ADEG <- ADEG %>% + ADEG <- ADEG |> mutate( TRT01A = as.factor(TRT01A), STUDYID = as.factor(STUDYID) @@ -94,40 +94,40 @@ test_that("a_summarize_aval_chg_diff_j works as expected", { ref_path <- c("colspan_trt", " ", "TRT01A", "Placebo") - lyt <- basic_table(round_type = "sas") %>% + lyt <- basic_table(round_type = "sas") |> ### first columns split_cols_by( "colspan_trt", split_fun = trim_levels_to_map(map = colspan_trt_map) - ) %>% - split_cols_by("TRT01A") %>% + ) |> + split_cols_by("TRT01A") |> split_rows_by( "PARAM", label_pos = "topleft", split_label = "Blood Pressure", section_div = " ", split_fun = drop_split_levels - ) %>% + ) |> split_rows_by( "AVISIT", label_pos = "topleft", split_label = "Study Visit", split_fun = drop_split_levels, child_labels = "hidden" - ) %>% + ) |> ## set up a 3 column split split_cols_by_multivar( c("AVAL", "AVAL", "CHG"), varlabels = c("n/N (%)", "Mean (CI)", "CFB (CI)") - ) %>% - split_cols_by("rrisk_header", nested = FALSE) %>% + ) |> + split_cols_by("rrisk_header", nested = FALSE) |> split_cols_by( "TRT01A", split_fun = remove_split_levels("Placebo"), labels_var = "rrisk_label" - ) %>% + ) |> ### difference columns : just 1 column & analysis needs to be done on change - split_cols_by_multivar(c("CHG"), varlabels = c(" ")) %>% + split_cols_by_multivar(c("CHG"), varlabels = c(" ")) |> ### the variable passed here in analyze is not used (STUDYID), it is a dummy var passing, ### the function a_summarize_aval_chg_diff_j grabs the required vars from cols_by_multivar calls analyze( @@ -207,7 +207,7 @@ test_that("a_summarize_aval_chg_diff_j works with ancova = TRUE", { CHG = c(2, 3, -1, 9, -2, 0, 6, -2, 5, 2) ) - ADEG <- ADEG %>% + ADEG <- ADEG |> mutate( TRT01A = as.factor(TRT01A), STUDYID = as.factor(STUDYID) @@ -230,40 +230,40 @@ test_that("a_summarize_aval_chg_diff_j works with ancova = TRUE", { ) ref_path <- c("colspan_trt", " ", "TRT01A", "Placebo") - lyt <- basic_table(round_type = "sas") %>% + lyt <- basic_table(round_type = "sas") |> ### first columns split_cols_by( "colspan_trt", split_fun = trim_levels_to_map(map = colspan_trt_map) - ) %>% - split_cols_by("TRT01A") %>% + ) |> + split_cols_by("TRT01A") |> split_rows_by( "PARAM", label_pos = "topleft", split_label = "Blood Pressure", section_div = " ", split_fun = drop_split_levels - ) %>% + ) |> split_rows_by( "AVISIT", label_pos = "topleft", split_label = "Study Visit", split_fun = drop_split_levels, child_labels = "hidden" - ) %>% + ) |> ## set up a 3 column split split_cols_by_multivar( c("AVAL", "AVAL", "CHG"), varlabels = c("n/N (%)", "Mean (CI)", "CFB (CI)") - ) %>% - split_cols_by("rrisk_header", nested = FALSE) %>% + ) |> + split_cols_by("rrisk_header", nested = FALSE) |> split_cols_by( "TRT01A", split_fun = remove_split_levels("Placebo"), labels_var = "rrisk_label" - ) %>% + ) |> ### difference columns : just 1 column & analysis needs to be done on change - split_cols_by_multivar(c("CHG"), varlabels = c(" ")) %>% + split_cols_by_multivar(c("CHG"), varlabels = c(" ")) |> ### the variable passed here in analyze is not used (STUDYID), it is a dummy var passing, ### the function a_summarize_aval_chg_diff_j grabs the required vars from cols_by_multivar calls analyze( diff --git a/tests/testthat/test-a_two_tier.R b/tests/testthat/test-a_two_tier.R new file mode 100644 index 00000000..7572b2e4 --- /dev/null +++ b/tests/testthat/test-a_two_tier.R @@ -0,0 +1,232 @@ +n <- 36 +set.seed(1) +data <- data.frame( + trt = factor(sample(c("A", "B", "C"), n, replace = TRUE)), + eostt = factor(sample(c("COMPLETED", "DISCONTINUED", "ONGOING"), n, replace = TRUE)), + dcsreas = factor(sample(c("ADVERSE EVENT", "LACK OF EFFICACY", "PHYSICIAN DECISION"), n, replace = TRUE)) +) + +# Start of tests ---- + +test_that("a_two_tier works silently for ex_adsl data with simple_analysis", { + lyt <- basic_table() |> + split_cols_by("ARM") |> + split_rows_by("EOSSTT", child_labels = "hidden") |> + analyze("EOSSTT", + afun = a_two_tier, + extra_args = list( + grp_fun = simple_analysis, + detail_fun = simple_analysis, + inner_var = "DCSREAS", + drill_down_levs = "DISCONTINUED" + ) + ) + + expect_silent(build_table(lyt, ex_adsl)) +}) + +test_that("a_two_tier works in a table layout as expected", { + lyt <- basic_table() |> + split_cols_by("trt") |> + split_rows_by("eostt", child_labels = "hidden") |> + analyze("eostt", + afun = a_two_tier, + extra_args = list( + grp_fun = simple_analysis, + detail_fun = simple_analysis, + inner_var = "dcsreas", + drill_down_levs = "DISCONTINUED" + ) + ) + res <- expect_silent(build_table(lyt, data)) + res_act <- matrix_form(res)$string + res_exp <- structure( + c( + "", "COMPLETED", "DISCONTINUED", "ADVERSE EVENT", "LACK OF EFFICACY", "PHYSICIAN DECISION", "ONGOING", + "A", "3", "9", "3", "4", "2", "3", + "B", "2", "5", "2", "0", "3", "5", + "C", "3", "3", "0", "2", "1", "3" + ), + dim = c(7, 4) + ) + expect_identical(res_act, res_exp) +}) + +test_that("a_two_tier produces the expected table layout when a level has no observations", { + lyt <- basic_table() |> + split_cols_by("trt") |> + split_rows_by("eostt", child_labels = "hidden") |> + analyze("eostt", + afun = a_two_tier, + extra_args = list( + grp_fun = simple_analysis, + detail_fun = simple_analysis, + inner_var = "dcsreas", + drill_down_levs = "DISCONTINUED" + ) + ) + + data_subset <- subset(data, dcsreas != "ADVERSE EVENT") + + res <- expect_silent(build_table(lyt, data_subset)) + res_act <- matrix_form(res)$string + res_exp <- structure( + c( + "", "COMPLETED", "DISCONTINUED", "LACK OF EFFICACY", "PHYSICIAN DECISION", "ONGOING", + "A", "3", "6", "4", "2", "2", + "B", "1", "3", "0", "3", "4", + "C", "1", "3", "2", "1", "2" + ), + dim = c(6, 4) + ) + expect_identical(res_act, res_exp) +}) + +test_that("a_two_tier produces the expected table layout when a level has no observations (use_all_levels)", { + lyt <- basic_table() |> + split_cols_by("trt") |> + split_rows_by("eostt", child_labels = "hidden") |> + analyze("eostt", + afun = a_two_tier, + extra_args = list( + grp_fun = simple_analysis, + detail_fun = simple_analysis, + inner_var = "dcsreas", + drill_down_levs = "DISCONTINUED", + use_all_levels = TRUE + ) + ) + + data_subset <- subset(data, dcsreas != "ADVERSE EVENT") + + res <- expect_silent(build_table(lyt, data_subset)) + res_act <- matrix_form(res)$string + res_exp <- structure( + c( + "", "COMPLETED", "DISCONTINUED", "ADVERSE EVENT", "LACK OF EFFICACY", "PHYSICIAN DECISION", "ONGOING", + "A", "3", "6", "0", "4", "2", "2", + "B", "1", "3", "0", "0", "3", "4", + "C", "1", "3", "0", "2", "1", "2" + ), + dim = c(7, 4) + ) + expect_identical(res_act, res_exp) +}) + +test_that("a_two_tier produces the expected table layout when there are no observations for any level", { + lyt <- basic_table() |> + split_cols_by("trt") |> + split_rows_by("eostt", child_labels = "hidden") |> + analyze("eostt", + afun = a_two_tier, + extra_args = list( + grp_fun = simple_analysis, + detail_fun = simple_analysis, + inner_var = "dcsreas", + drill_down_levs = "DISCONTINUED" + ) + ) + + data_subset <- subset(data, eostt != "DISCONTINUED") + + res <- expect_silent(build_table(lyt, data_subset)) + res_act <- matrix_form(res)$string + res_exp <- structure( + c( + "", "COMPLETED", "DISCONTINUED", "ONGOING", + "A", "3", "0", "3", + "B", "2", "0", "5", + "C", "3", "0", "3" + ), + dim = c(4, 4) + ) + expect_identical(res_act, res_exp) +}) + +test_that("a_two_tier produces the expected table layout - no observations at any level (use_all_levels)", { + lyt <- basic_table() |> + split_cols_by("trt") |> + split_rows_by("eostt", child_labels = "hidden") |> + analyze("eostt", + afun = a_two_tier, + extra_args = list( + grp_fun = simple_analysis, + detail_fun = simple_analysis, + inner_var = "dcsreas", + drill_down_levs = "DISCONTINUED", + use_all_levels = TRUE + ) + ) + + data_subset <- subset(data, eostt != "DISCONTINUED") + + res <- expect_silent(build_table(lyt, data_subset)) + res_act <- matrix_form(res)$string + res_exp <- structure( + c( + "", "COMPLETED", "DISCONTINUED", "ONGOING", + "A", "3", "0", "3", + "B", "2", "0", "5", + "C", "3", "0", "3" + ), + dim = c(4, 4) + ) + expect_identical(res_act, res_exp) +}) + +test_that("a_two_tier produces the expected table layout when there is no data at all - only levels", { + lyt <- basic_table() |> + split_cols_by("trt") |> + split_rows_by("eostt", child_labels = "hidden") |> + analyze("eostt", + afun = a_two_tier, + extra_args = list( + grp_fun = simple_analysis, + detail_fun = simple_analysis, + inner_var = "dcsreas", + drill_down_levs = "DISCONTINUED" + ) + ) + + res <- expect_silent(build_table(lyt, data[0, ])) + res_act <- matrix_form(res)$string + res_exp <- structure( + c( + "", "COMPLETED", "DISCONTINUED", "ONGOING", + "A", "0", "0", "0", + "B", "0", "0", "0", + "C", "0", "0", "0" + ), + dim = c(4, 4) + ) + expect_identical(res_act, res_exp) +}) + +test_that("a_two_tier produces the expected table layout when there is no data at all - only levels (use_all_levels)", { + lyt <- basic_table() |> + split_cols_by("trt") |> + split_rows_by("eostt", child_labels = "hidden") |> + analyze("eostt", + afun = a_two_tier, + extra_args = list( + grp_fun = simple_analysis, + detail_fun = simple_analysis, + inner_var = "dcsreas", + drill_down_levs = "DISCONTINUED", + use_all_levels = TRUE + ) + ) + + res <- expect_silent(build_table(lyt, data[0, ])) + res_act <- matrix_form(res)$string + res_exp <- structure( + c( + "", "COMPLETED", "DISCONTINUED", "ONGOING", + "A", "0", "0", "0", + "B", "0", "0", "0", + "C", "0", "0", "0" + ), + dim = c(4, 4) + ) + expect_identical(res_act, res_exp) +}) diff --git a/tests/testthat/test-analyze_values.R b/tests/testthat/test-analyze_values.R index 1027158e..57012b63 100644 --- a/tests/testthat/test-analyze_values.R +++ b/tests/testthat/test-analyze_values.R @@ -2,7 +2,7 @@ library(rtables) test_that("analyze_values correctly modifies layout with proper formats", { # Create a simple layout - lyt <- basic_table(round_type = "sas") %>% + lyt <- basic_table(round_type = "sas") |> split_cols_by("ARM") # Define variables to analyze diff --git a/tests/testthat/test-ancova_rbmi.R b/tests/testthat/test-ancova_rbmi.R index 8f38866d..5e92228c 100644 --- a/tests/testthat/test-ancova_rbmi.R +++ b/tests/testthat/test-ancova_rbmi.R @@ -1,6 +1,12 @@ library(tibble) library(dplyr) +if (requireNamespace("rbmi", quietly = TRUE)) { + suppressPackageStartupMessages(library(rbmi)) +} else { + skip("rbmi package not available") +} + f2n <- function(x) as.numeric(x) - 1 test_that("rbmi_ancova_single works also with multiple treatment arms", { diff --git a/tests/testthat/test-cmp_functions.R b/tests/testthat/test-cmp_functions.R index 6f5e6a5a..e7fdd9ec 100644 --- a/tests/testthat/test-cmp_functions.R +++ b/tests/testthat/test-cmp_functions.R @@ -1,5 +1,5 @@ test_that("cmp_split_fun works as expected", { - result <- basic_table(round_type = "sas") %>% + result <- basic_table(round_type = "sas") |> split_cols_by("ID", split_fun = cmp_split_fun) |> build_table(formatters::DM) expect_snapshot(result) diff --git a/tests/testthat/test-colwidths.R b/tests/testthat/test-colwidths.R index 2a922cf3..e08f9354 100644 --- a/tests/testthat/test-colwidths.R +++ b/tests/testthat/test-colwidths.R @@ -40,7 +40,7 @@ lyt <- basic_table(round_type = "sas") |> tt <- build_table(lyt, ADSL) test_that("ttype_wrap_vec works as expected", { - result <- ttype_wrap_vec(vec = c(1, 2, 3, 4) %>% as.character(), fontspec = fontspec, width = 2) + result <- ttype_wrap_vec(vec = c(1, 2, 3, 4) |> as.character(), fontspec = fontspec, width = 2) # TODO: how do I guess expected_result expected_result <- list( c("1"), @@ -70,7 +70,7 @@ test_that("def_colwidths works as expected", { ARM = "Description\nOf\nPlanned Arm" ) # nolint start - suppressMessages(tt2 <- as_listing(anl, key_cols = c("USUBJID")) %>% + suppressMessages(tt2 <- as_listing(anl, key_cols = c("USUBJID")) |> add_listing_col("ARM")) # nolint end @@ -88,7 +88,7 @@ test_that("listing_column_widths works as expected", { ARM = "Description\nOf\nPlanned Arm" ) # nolint start - suppressMessages(tt3 <- as_listing(anl, key_cols = c("USUBJID")) %>% + suppressMessages(tt3 <- as_listing(anl, key_cols = c("USUBJID")) |> add_listing_col("ARM")) # nolint end mpf <- rlistings::matrix_form(tt3) @@ -106,7 +106,7 @@ test_that("find_free_colspc works as expected", { ARM = "Description\nOf\nPlanned Arm" ) # nolint start - suppressMessages(tt4 <- as_listing(anl, key_cols = c("USUBJID")) %>% + suppressMessages(tt4 <- as_listing(anl, key_cols = c("USUBJID")) |> add_listing_col("ARM")) # nolint end mpf <- rlistings::matrix_form(tt4) diff --git a/tests/testthat/test-count_pct.R b/tests/testthat/test-count_pct.R index 3d684af1..975e20d3 100644 --- a/tests/testthat/test-count_pct.R +++ b/tests/testthat/test-count_pct.R @@ -12,8 +12,8 @@ test_that("a_freq_j with val = NA and denom option", { extra_args_1 <- list( .stats = c("count_unique_denom_fraction") ) - lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -33,8 +33,8 @@ test_that("a_freq_j with val = NA and denom option", { denom = "N_col", .stats = c("count_unique_denom_fraction") ) - lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -51,8 +51,8 @@ test_that("a_freq_j with specific val (CHN) and denom option", { .stats = c("count_unique_denom_fraction"), val = "CHN" ) - lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -73,8 +73,8 @@ test_that("a_freq_j with specific val (CHN) and denom option", { .stats = c("count_unique_denom_fraction"), val = "CHN" ) - lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -91,8 +91,8 @@ test_that("a_freq_j with N_only", { extra_args_1 <- list( .stats = c("count_unique") ) - lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -115,9 +115,9 @@ test_that("a_freq_j with TotCol_only", { .stats = c("count_unique"), restr_columns = "Total" ) - lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% - add_overall_col("Total") %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> + add_overall_col("Total") |> analyze(vars = "COUNTRY", afun = a_freq_j, extra_args = extra_args_1) # apply to adsl @@ -135,21 +135,21 @@ test_that("a_freq_j as cfun", { Ncol <- length(unique(adsl_col[["USUBJID"]])) adae_col <- adae[adae$ARM == "A: Drug X", ] - adae_col_bs <- unique(adae_col %>% select(USUBJID, AEBODSYS)) + adae_col_bs <- unique(adae_col |> select(USUBJID, AEBODSYS)) # scenario 1 : extra_args_1 <- list( denom = "N_col", .stats = c("count_unique_denom_fraction") ) - lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% - split_rows_by("AEBODSYS") %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> + split_rows_by("AEBODSYS") |> summarize_row_groups( "AEBODSYS", cfun = a_freq_j, extra_args = extra_args_1 - ) %>% + ) |> analyze( vars = "AEDECOD", afun = a_freq_j, @@ -163,14 +163,14 @@ test_that("a_freq_j as cfun", { # scenario 2 : label using label_fstr method works extra_args_2 <- append(extra_args_1, list(label_fstr = "Bodysystem %s")) - lyt2 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% - split_rows_by("AEBODSYS") %>% + lyt2 <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> + split_rows_by("AEBODSYS") |> summarize_row_groups( "AEBODSYS", cfun = a_freq_j, extra_args = extra_args_2 - ) %>% + ) |> analyze( vars = "AEDECOD", afun = a_freq_j, @@ -187,7 +187,7 @@ test_that("a_freq_j with label map", { Ncol <- length(unique(adsl_col[["USUBJID"]])) adae_col <- adae[adae$ARM == "A: Drug X", ] - adae_col_sub <- unique(adae_col %>% select(USUBJID)) + adae_col_sub <- unique(adae_col |> select(USUBJID)) Subjs_with_AEs <- tibble::tribble( ~value, @@ -198,8 +198,8 @@ test_that("a_freq_j with label map", { # scenario 1 : with label_map extra_args_1 <- list(label_map = Subjs_with_AEs) - lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( vars = "TRTEMFL", afun = a_freq_j, @@ -212,8 +212,8 @@ test_that("a_freq_j with label map", { # scenario 2 : set row label using label parameter extra_args_2 <- list(label = "Subjects with >= 1 AE") - lyt2 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt2 <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( vars = "TRTEMFL", afun = a_freq_j, @@ -230,8 +230,8 @@ test_that("a_freq_j (old count_pats case)", { extra_args_1 <- list( .stats = c("count_unique_denom_fraction") ) - lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -251,8 +251,8 @@ test_that("a_freq_j (old count_pats case)", { denom = "N_col", .stats = c("count_unique_denom_fraction") ) - lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -268,9 +268,9 @@ test_that("a_freq_j with N_subgroup as denom", { # scenario 1: denom = N_subgroup, all values extra_args_1 <- list(denom = "n_df") - lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% - split_rows_by("SEX") %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> + split_rows_by("SEX") |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -294,9 +294,9 @@ test_that("a_freq_j with N_subgroup as denom", { .stats = c("count_unique_denom_fraction") ) - lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% - split_rows_by("SEX") %>% + lyt1c <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> + split_rows_by("SEX") |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -316,14 +316,14 @@ test_that("a_freq_j with N_trt as denom - special situation", { ### spanning header for severity - adsl_ <- ex_adsl %>% select(USUBJID, ARM) - adae_ <- ex_adae %>% select(USUBJID, ARM, AEBODSYS, AEDECOD, AESEV) + adsl_ <- ex_adsl |> select(USUBJID, ARM) + adae_ <- ex_adae |> select(USUBJID, ARM, AEBODSYS, AEDECOD, AESEV) adae_$TRTEMFL <- "Y" trtvar <- "ARM" ## Total column on adsl - adsl_ <- adsl_ %>% + adsl_ <- adsl_ |> mutate(ASEV = factor("Total", levels = c("Total", levels(adae$AESEV)))) adsl_$spanheader <- factor( ifelse(adsl_$ASEV == "Total", " ", "Severity"), @@ -331,34 +331,34 @@ test_that("a_freq_j with N_trt as denom - special situation", { ) ## ae : max severity per subject per SOC - adaetot <- adae_ %>% + adaetot <- adae_ |> mutate( AESEV = "Total", AEBODSYSx = AEBODSYS - ) %>% - arrange(USUBJID, AEBODSYS, AEDECOD) %>% - group_by(USUBJID, AEBODSYS, AEDECOD) %>% - slice(1) %>% + ) |> + arrange(USUBJID, AEBODSYS, AEDECOD) |> + group_by(USUBJID, AEBODSYS, AEDECOD) |> + slice(1) |> ungroup() # Take maximum severity - per PT - adaemaxpt <- adae_ %>% - filter(toupper(AESEV) %in% toupper(c("Mild", "Moderate", "Severe"))) %>% + adaemaxpt <- adae_ |> + filter(toupper(AESEV) %in% toupper(c("Mild", "Moderate", "Severe"))) |> mutate( AESEVN = case_when( toupper(AESEV) == "MILD" ~ 3, toupper(AESEV) == "MODERATE" ~ 2, toupper(AESEV) == "SEVERE" ~ 1 ) - ) %>% - arrange(USUBJID, AEBODSYS, AEDECOD, AESEVN) %>% - group_by(USUBJID, AEBODSYS, AEDECOD) %>% - slice(1) %>% + ) |> + arrange(USUBJID, AEBODSYS, AEDECOD, AESEVN) |> + group_by(USUBJID, AEBODSYS, AEDECOD) |> + slice(1) |> ungroup() # Take maximum severity - per SOC - adaemaxsoc <- adae_ %>% - filter(toupper(AESEV) %in% toupper(c("Mild", "Moderate", "Severe"))) %>% + adaemaxsoc <- adae_ |> + filter(toupper(AESEV) %in% toupper(c("Mild", "Moderate", "Severe"))) |> mutate( AESEVN = case_when( toupper(AESEV) == "MILD" ~ 3, @@ -366,11 +366,11 @@ test_that("a_freq_j with N_trt as denom - special situation", { toupper(AESEV) == "SEVERE" ~ 1 ), AEBODSYSx = AEBODSYS - ) %>% - arrange(USUBJID, AEBODSYS, AESEVN) %>% - group_by(USUBJID, AEBODSYS) %>% - slice(1) %>% - ungroup() %>% + ) |> + arrange(USUBJID, AEBODSYS, AESEVN) |> + group_by(USUBJID, AEBODSYS) |> + slice(1) |> + ungroup() |> select(USUBJID, AEBODSYS, AESEV, AEBODSYSx) # Merge back in an create a new SOC variable that is only populated @@ -382,28 +382,28 @@ test_that("a_freq_j with N_trt as denom - special situation", { ) # Add total - adaetot <- adae_ %>% + adaetot <- adae_ |> mutate( AESEV = "Total", AEBODSYSx = AEBODSYS - ) %>% - arrange(USUBJID, AEBODSYS, AEDECOD) %>% - group_by(USUBJID, AEBODSYS, AEDECOD) %>% - slice(1) %>% + ) |> + arrange(USUBJID, AEBODSYS, AEDECOD) |> + group_by(USUBJID, AEBODSYS, AEDECOD) |> + slice(1) |> ungroup() # Set data together - adaeall <- bind_rows(adaemax, adaetot) %>% + adaeall <- bind_rows(adaemax, adaetot) |> mutate( ASEV = factor( as.character(AESEV), levels = c("Total", levels(ex_adae$AESEV)) ) - ) %>% + ) |> select(USUBJID, TRTEMFL, ASEV, AEBODSYS, AEBODSYSx, AEDECOD) - adaeall <- adaeall %>% - inner_join(., adsl_ %>% select(USUBJID, ARM), by = c("USUBJID")) + adaeall <- adaeall |> + inner_join(adsl_ |> select(USUBJID, ARM), by = c("USUBJID"), relationship = "many-to-many") adaeall$spanheader <- factor( ifelse(adaeall$ASEV == "Total", " ", "Severity"), @@ -417,17 +417,17 @@ test_that("a_freq_j with N_trt as denom - special situation", { colgroup = "ARM", riskdiff = FALSE ) - lyt <- basic_table(round_type = "sas") %>% - split_cols_by(trtvar, show_colcounts = TRUE) %>% - split_cols_by("spanheader", split_fun = trim_levels_in_group("ASEV")) %>% - split_cols_by("ASEV", show_colcounts = TRUE) %>% + lyt <- basic_table(round_type = "sas") |> + split_cols_by(trtvar, show_colcounts = TRUE) |> + split_cols_by("spanheader", split_fun = trim_levels_in_group("ASEV")) |> + split_cols_by("ASEV", show_colcounts = TRUE) |> split_rows_by( "AEBODSYS", split_label = "System Organ Class", split_fun = trim_levels_in_group("AEDECOD"), label_pos = "topleft", section_div = c(" ") - ) %>% + ) |> summarize_row_groups( "AEBODSYSx", cfun = a_freq_j, @@ -456,8 +456,8 @@ test_that("a_freq_j with keep_levels (CHN, NGA) ", { denom = "N_col", val = c("CHN", "NGA") ) - lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt1 <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze(vars = "COUNTRY", afun = a_freq_j, extra_args = extra_args_1) # apply to adsl diff --git a/tests/testthat/test-count_pct_relrisk.R b/tests/testthat/test-count_pct_relrisk.R index fbdaed5d..47a8d9a6 100644 --- a/tests/testthat/test-count_pct_relrisk.R +++ b/tests/testthat/test-count_pct_relrisk.R @@ -2,8 +2,8 @@ library(rtables) library(dplyr) suppressPackageStartupMessages(library(tern)) -adsl <- ex_adsl %>% select(USUBJID, ARM, COUNTRY, STRATA1, SEX) -adae <- ex_adae %>% select(USUBJID, AEDECOD, AEBODSYS, ARM) +adsl <- ex_adsl |> select(USUBJID, ARM, COUNTRY, STRATA1, SEX) +adae <- ex_adae |> select(USUBJID, AEDECOD, AEBODSYS, ARM) adae$TRTEMFL <- "Y" trtvar <- "ARM" @@ -21,10 +21,10 @@ adsl$rrisk_label <- paste(adsl[["ARM"]], "vs Placebo") adae <- left_join(adae, adsl, by = join_by(USUBJID, ARM)) -core_lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% - split_cols_by("ARM") %>% - split_cols_by("rrisk_header", nested = FALSE) %>% +core_lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) |> + split_cols_by("ARM") |> + split_cols_by("rrisk_header", nested = FALSE) |> split_cols_by( "ARM", labels_var = "rrisk_label", @@ -96,7 +96,7 @@ test_that("a_freq_j with val = NA and denom option", { method = "wald" ) - lyt1 <- core_lyt %>% + lyt1 <- core_lyt |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -107,7 +107,7 @@ test_that("a_freq_j with val = NA and denom option", { tbl1 <- build_table(lyt1, adsl) res1 <- cell_values(tbl1["CHN", "A: Drug X"]) res1_val <- unlist(unname(res1[[DrugX_column_val]])) - res1_rr <- res1[[DrugX_column_rr]] %>% as.numeric() + res1_rr <- res1[[DrugX_column_rr]] |> as.numeric() Ncol <- count_unique_subjects(adsl_col) N <- count_unique_subjects(adsl_col) @@ -148,7 +148,7 @@ test_that("a_freq_j with val = NA and denom option", { method = "wald" ) - lyt1b <- core_lyt %>% + lyt1b <- core_lyt |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -157,7 +157,7 @@ test_that("a_freq_j with val = NA and denom option", { tbl1b <- build_table(lyt1b, adae, adsl) res1b <- cell_values(tbl1b["CHN", "A: Drug X"]) res1b_val <- unlist(unname(res1b[[DrugX_column_val]])) - res1b_rr <- res1b[[DrugX_column_rr]] %>% as.numeric() + res1b_rr <- res1b[[DrugX_column_rr]] |> as.numeric() ### comparison of main columns (similar to tests in test-count_pct.R ) Ncol <- count_unique_subjects(adsl_col) @@ -197,7 +197,7 @@ test_that("a_freq_j with val = NA and denom option", { method = "wald" ) - lyt1c <- core_lyt %>% + lyt1c <- core_lyt |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -207,7 +207,7 @@ test_that("a_freq_j with val = NA and denom option", { res1c <- cell_values(tbl1c["CHN", "A: Drug X"]) res1c_val <- unlist(unname(res1c[[DrugX_column_val]])) - res1c_rr <- res1c[[DrugX_column_rr]] %>% as.numeric() + res1c_rr <- res1c[[DrugX_column_rr]] |> as.numeric() ### comparison of main columns (similar to tests in test-count_pct.R ) Ncol <- count_unique_subjects(adsl_col) @@ -261,7 +261,7 @@ test_that("a_freq_j with risk difference method cmh", { ref_path = ref_path, variables = list(strata = "STRATA1") ) - lyt1d <- core_lyt %>% + lyt1d <- core_lyt |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -272,7 +272,7 @@ test_that("a_freq_j with risk difference method cmh", { res1d <- cell_values(tbl1d["CHN", "A: Drug X"]) res1d_val <- unlist(unname(res1d[[DrugX_column_val]])) - res1d_rr <- res1d[[DrugX_column_rr]] %>% as.numeric() + res1d_rr <- res1d[[DrugX_column_rr]] |> as.numeric() ### comparison of main columns (similar to tests in test-count_pct.R ) Ncol <- count_unique_subjects(adsl_col) @@ -297,9 +297,9 @@ test_that("a_freq_j with risk difference method cmh", { N_PBO <- count_unique_subjects(adae_colPBO_x) ### construct input vectors to utilize tern::prop_diff_cmh - adae_col_chn <- adae %>% + adae_col_chn <- adae |> filter(ARM == "A: Drug X" & COUNTRY == "CHN") - adae_colPBO_chn <- adae %>% + adae_colPBO_chn <- adae |> filter(ARM == ctrl_grp & COUNTRY == "CHN") subj_col <- adsl_col[["USUBJID"]] @@ -361,8 +361,8 @@ test_that("a_freq_j with N_subgroup as denom", { ctrl_grp = ctrl_grp, ref_path = ref_path ) - lyt1 <- core_lyt %>% - split_rows_by("SEX") %>% + lyt1 <- core_lyt |> + split_rows_by("SEX") |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -383,7 +383,7 @@ test_that("a_freq_j with N_subgroup as denom", { ] ) res1_val <- unlist(unname(res1[[DrugX_column_val]])) - res1_rr <- res1[[DrugX_column_rr]] %>% as.numeric() + res1_rr <- res1[[DrugX_column_rr]] |> as.numeric() Ncol <- length(unique(adsl_col[["USUBJID"]])) Nsubgroup <- length(unique(adsl_col_subgroup[["USUBJID"]])) @@ -431,7 +431,7 @@ test_that("a_freq_j with N_subgroup as denom", { ] ) res1b_val <- unlist(unname(res1b[[DrugX_column_val]])) - res1b_rr <- res1b[[DrugX_column_rr]] %>% as.numeric() + res1b_rr <- res1b[[DrugX_column_rr]] |> as.numeric() Ncol <- length(unique(adsl_col[["USUBJID"]])) Nsubgroup <- length(unique(adsl_col_subgroup[["USUBJID"]])) @@ -479,8 +479,8 @@ test_that("a_freq_j with N_subgroup as denom", { ref_path = ref_path ) - lyt1c <- core_lyt %>% - split_rows_by("SEX") %>% + lyt1c <- core_lyt |> + split_rows_by("SEX") |> analyze( vars = "COUNTRY", afun = a_freq_j, @@ -500,7 +500,7 @@ test_that("a_freq_j with N_subgroup as denom", { ] ) res1c_val <- unlist(unname(res1c[[DrugX_column_val]])) - res1c_rr <- res1c[[DrugX_column_rr]] %>% as.numeric() + res1c_rr <- res1c[[DrugX_column_rr]] |> as.numeric() Ncol <- length(unique(adsl_col[["USUBJID"]])) Nsubgroup <- length(unique(adsl_col_subgroup[["USUBJID"]])) diff --git a/tests/testthat/test-coxph_hr.R b/tests/testthat/test-coxph_hr.R index 65423463..3c071250 100644 --- a/tests/testthat/test-coxph_hr.R +++ b/tests/testthat/test-coxph_hr.R @@ -3,11 +3,11 @@ library(survival) test_that("s_coxph_hr works with default arguments and no stratification factors", { - adtte_f <- tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate(is_event = CNSR == 0) - df <- adtte_f %>% dplyr::filter(ARMCD == "ARM A") - df_ref <- adtte_f %>% dplyr::filter(ARMCD == "ARM B") + df <- adtte_f |> dplyr::filter(ARMCD == "ARM A") + df_ref <- adtte_f |> dplyr::filter(ARMCD == "ARM B") result <- s_coxph_hr( df = df, @@ -46,12 +46,12 @@ test_that("s_coxph_hr works with default arguments and no stratification factors }) test_that("a_coxph_hr works with custom arguments and stratification factors", { - adtte_f <- tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate(is_event = CNSR == 0) - result <- basic_table(round_type = "sas") %>% - split_cols_by(var = "ARMCD") %>% + result <- basic_table(round_type = "sas") |> + split_cols_by(var = "ARMCD") |> analyze( vars = "AVAL", afun = a_coxph_hr, @@ -69,7 +69,7 @@ test_that("a_coxph_hr works with custom arguments and stratification factors", { ref_path = c("ARMCD", "ARM A"), .stats = c("hr_ci_3d", "pvalue") ) - ) %>% + ) |> build_table(df = adtte_f) res <- expect_silent(result) @@ -77,12 +77,12 @@ test_that("a_coxph_hr works with custom arguments and stratification factors", { }) test_that("a_coxph_hr works with stratification factors for Log-Rank test", { - adtte_f <- tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate(is_event = CNSR == 0) - result <- basic_table(round_type = "sas") %>% - split_cols_by(var = "ARMCD") %>% + result <- basic_table(round_type = "sas") |> + split_cols_by(var = "ARMCD") |> analyze( vars = "AVAL", afun = a_coxph_hr, @@ -100,7 +100,7 @@ test_that("a_coxph_hr works with stratification factors for Log-Rank test", { ref_path = c("ARMCD", "ARM A"), .stats = c("hr_ci_3d", "pvalue") ) - ) %>% + ) |> build_table(df = adtte_f) res <- expect_silent(result) diff --git a/tests/testthat/test-coxreg_multivar.R b/tests/testthat/test-coxreg_multivar.R index 492ca9e9..7958e1c7 100644 --- a/tests/testthat/test-coxreg_multivar.R +++ b/tests/testthat/test-coxreg_multivar.R @@ -27,7 +27,7 @@ test_that("h_extract_coxreg_multivar works as expected", { }) test_that("tefos03_first_split_fun works as expected", { - lyt <- basic_table() %>% + lyt <- basic_table() |> split_cols_by("ID", split_fun = tefos03_first_split_fun) result <- expect_silent(build_table(lyt, DM)) expect_snapshot(col_info(result)) @@ -36,7 +36,7 @@ test_that("tefos03_first_split_fun works as expected", { test_that("tefos03_second_split_fun_fct works as expected", { split_fun <- tefos03_second_split_fun_fct(conf_level = 0.92) - lyt <- basic_table() %>% + lyt <- basic_table() |> split_cols_by("ID", split_fun = tefos03_first_split_fun) |> split_cols_by("ID", split_fun = split_fun) result <- expect_silent(build_table(lyt, DM)) diff --git a/tests/testthat/test-event_free.R b/tests/testthat/test-event_free.R index ed91897d..aa3fd105 100644 --- a/tests/testthat/test-event_free.R +++ b/tests/testthat/test-event_free.R @@ -1,6 +1,6 @@ test_that("s_event_free works with default arguments", { - adtte_f <- tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate( AVAL = tern::day2month(AVAL), is_event = CNSR == 0 @@ -19,8 +19,8 @@ test_that("s_event_free works with default arguments", { }) test_that("s_event_free works with percent format", { - adtte_f <- tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate( AVAL = tern::day2month(AVAL), is_event = CNSR == 0 @@ -40,14 +40,14 @@ test_that("s_event_free works with percent format", { }) test_that("a_event_free works with default arguments in a table layout", { - adtte_f <- tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate( AVAL = tern::day2month(AVAL), is_event = CNSR == 0 ) - lyt <- basic_table(round_type = "sas") %>% + lyt <- basic_table(round_type = "sas") |> split_cols_by(var = "ARMCD") for (time_point in c(3, 4, 5)) { lyt <- lyt |> @@ -72,14 +72,14 @@ test_that("a_event_free works with default arguments in a table layout", { }) test_that("a_event_free works with customized arguments in a table layout", { - adtte_f <- tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate( AVAL = tern::day2month(AVAL), is_event = CNSR == 0 ) - lyt <- basic_table(round_type = "sas") %>% + lyt <- basic_table(round_type = "sas") |> split_cols_by(var = "ARMCD") for (time_point in c(5, 1, 7)) { lyt <- lyt |> diff --git a/tests/testthat/test-junco_utils_default_stats_formats_labels.R b/tests/testthat/test-junco_utils_default_stats_formats_labels.R index 0d5a657f..7470ed24 100644 --- a/tests/testthat/test-junco_utils_default_stats_formats_labels.R +++ b/tests/testthat/test-junco_utils_default_stats_formats_labels.R @@ -3,19 +3,24 @@ # Note that these tests are minimal, given that the junco functions are merely wrappers # of the tern functions, pointing to junco defaults. +normalize_fun <- function(fun) { + stopifnot(is.function(fun)) + txt <- paste(deparse(body(fun)), collapse = "") + gsub("\\s+", "", txt) +} + test_that("get_stats works as expected", { res <- junco_get_stats("kaplan_meier") expect_snapshot(res) }) + test_that("get_formats_from_stats works as expected", { sts <- c("quantiles_upper", "range_with_cens_info") res <- junco_get_formats_from_stats(sts) - environment(res[["quantiles_upper"]]) <- baseenv() - environment(res[["range_with_cens_info"]]) <- baseenv() - - expect_snapshot(res) + expect_snapshot(normalize_fun(res$quantiles_upper)) + expect_snapshot(normalize_fun(res$range_with_cens_info)) }) test_that("get_labels_from_stats works as expected", { diff --git a/tests/testthat/test-kaplan_meier.R b/tests/testthat/test-kaplan_meier.R index 39d5d10e..7891ef9b 100644 --- a/tests/testthat/test-kaplan_meier.R +++ b/tests/testthat/test-kaplan_meier.R @@ -1,15 +1,15 @@ library(tern) test_that("s_kaplan_meier works with default arguments", { - adtte_f <- tern::tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern::tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate( AVAL = tern::day2month(AVAL), is_event = CNSR == 0 ) result <- expect_silent(s_kaplan_meier( - df = adtte_f %>% dplyr::filter(ARMCD == "ARM B"), + df = adtte_f |> dplyr::filter(ARMCD == "ARM B"), .var = "AVAL", is_event = "is_event" )) @@ -18,8 +18,8 @@ test_that("s_kaplan_meier works with default arguments", { }) test_that("s_kaplan_meier works with customized arguments", { - adtte_f <- tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate( AVAL = tern::day2month(AVAL), is_event = CNSR == 0 @@ -42,8 +42,8 @@ test_that("s_kaplan_meier works with customized arguments", { }) test_that("a_kaplan_meier works with default arguments", { - adtte_f <- tern::tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern::tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate( AVAL = tern::day2month(AVAL), is_event = CNSR == 0 @@ -61,8 +61,8 @@ test_that("a_kaplan_meier works with default arguments", { }) test_that("a_kaplan_meier works with customized arguments", { - adtte_f <- tern::tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern::tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate( AVAL = tern::day2month(AVAL), is_event = CNSR == 0 @@ -85,8 +85,8 @@ test_that("a_kaplan_meier works with customized arguments", { }) test_that("a_kaplan_meier works inside analyze in table", { - adtte_f <- tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate( AVAL = tern::day2month(AVAL), is_event = CNSR == 0 @@ -95,10 +95,10 @@ test_that("a_kaplan_meier works inside analyze in table", { adtte_f$AVAL == max(adtte_f$AVAL[adtte_f$ARMCD == "ARM A"]) ] <- FALSE - result <- basic_table(round_type = "sas") %>% + result <- basic_table(round_type = "sas") |> split_cols_by( var = "ARMCD" - ) %>% + ) |> analyze( vars = "AVAL", afun = a_kaplan_meier, @@ -107,7 +107,7 @@ test_that("a_kaplan_meier works inside analyze in table", { extra_args = list( is_event = "is_event" ) - ) %>% + ) |> build_table(df = adtte_f) res <- expect_silent(result) @@ -115,8 +115,8 @@ test_that("a_kaplan_meier works inside analyze in table", { }) test_that("a_kaplan_meier works inside analyze in table with custom arguments", { - adtte_f <- tern_ex_adtte %>% - dplyr::filter(PARAMCD == "OS") %>% + adtte_f <- tern_ex_adtte |> + dplyr::filter(PARAMCD == "OS") |> dplyr::mutate( AVAL = tern::day2month(AVAL), is_event = CNSR == 0 @@ -125,10 +125,10 @@ test_that("a_kaplan_meier works inside analyze in table with custom arguments", adtte_f$AVAL == max(adtte_f$AVAL[adtte_f$ARMCD == "ARM A"]) ] <- FALSE - result <- basic_table(round_type = "sas") %>% + result <- basic_table(round_type = "sas") |> split_cols_by( var = "ARMCD" - ) %>% + ) |> analyze( vars = "AVAL", afun = a_kaplan_meier, @@ -144,7 +144,7 @@ test_that("a_kaplan_meier works inside analyze in table with custom arguments", .labels = c(range_with_cens_info = "Min and Max"), .indent_mods = c(median_ci_3d = 3L) ) - ) %>% + ) |> build_table(df = adtte_f) res <- expect_silent(result) diff --git a/tests/testthat/test-mmrm_rbmi.R b/tests/testthat/test-mmrm_rbmi.R index e7daf11d..e221ad21 100644 --- a/tests/testthat/test-mmrm_rbmi.R +++ b/tests/testthat/test-mmrm_rbmi.R @@ -1,3 +1,9 @@ +if (requireNamespace("rbmi", quietly = TRUE)) { + suppressPackageStartupMessages(library(rbmi)) +} else { + skip("rbmi package not available") +} + suppressPackageStartupMessages(library(rbmi)) f2n <- function(x) as.numeric(x) - 1 diff --git a/tests/testthat/test-patyrs-eair100.R b/tests/testthat/test-patyrs-eair100.R index 635edc4f..feaa6088 100644 --- a/tests/testthat/test-patyrs-eair100.R +++ b/tests/testthat/test-patyrs-eair100.R @@ -4,26 +4,26 @@ library(dplyr) ref_path <- c("ARM", "B: Placebo") -adsl <- ex_adsl %>% - mutate(TRTDURY = substring(USUBJID, nchar(USUBJID) - 3 + 1), "-", "") %>% - mutate(TRTDURY = sub("-", "", TRTDURY)) %>% - mutate(TRTDURY = sub("d", "", TRTDURY)) %>% - mutate(TRTDURY = as.numeric(TRTDURY)) %>% - mutate(TRTDURY2 = TRTDURY + 25) %>% +adsl <- ex_adsl |> + mutate(TRTDURY = substring(USUBJID, nchar(USUBJID) - 3 + 1), "-", "") |> + mutate(TRTDURY = sub("-", "", TRTDURY)) |> + mutate(TRTDURY = sub("d", "", TRTDURY)) |> + mutate(TRTDURY = as.numeric(TRTDURY)) |> + mutate(TRTDURY2 = TRTDURY + 25) |> select(USUBJID, ARM, COUNTRY, STRATA1, TRTDURY, TRTDURY2, SEX) -adae <- ex_adae %>% +adae <- ex_adae |> select(USUBJID, AEDECOD, AEBODSYS, ASTDY) adae$TRTEMFL <- "Y" # set up occurrence flag for first occurrence of event -adaefirst <- adae %>% - arrange(USUBJID, AEBODSYS, AEDECOD, ASTDY) %>% - group_by(USUBJID, AEBODSYS, AEDECOD) %>% - slice(1) %>% - ungroup() %>% - mutate(AOCCPFL = "Y") %>% +adaefirst <- adae |> + arrange(USUBJID, AEBODSYS, AEDECOD, ASTDY) |> + group_by(USUBJID, AEBODSYS, AEDECOD) |> + slice(1) |> + ungroup() |> + mutate(AOCCPFL = "Y") |> select(USUBJID, AEBODSYS, AEDECOD, ASTDY, AOCCPFL) adae <- left_join( @@ -44,13 +44,13 @@ adsl$colspan_trt <- factor( adsl$rrisk_header <- "Risk Difference (%) (95% CI)" adsl$rrisk_label <- paste(adsl[["ARM"]], "vs Placebo") -adae <- left_join(adsl, adae, by = "USUBJID") %>% +adae <- left_join(adsl, adae, by = "USUBJID") |> mutate(ASTDY2 = ASTDY + 10) -core_lyt <- basic_table(show_colcounts = FALSE, round_type = "sas") %>% - split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% - split_cols_by("ARM") %>% - split_cols_by("rrisk_header", nested = FALSE) %>% +core_lyt <- basic_table(show_colcounts = FALSE, round_type = "sas") |> + split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) |> + split_cols_by("ARM") |> + split_cols_by("rrisk_header", nested = FALSE) |> split_cols_by( "ARM", labels_var = "rrisk_label", @@ -65,7 +65,7 @@ test_that("Check patient years numbers are giving expected result", { label = c("Subject years\u1D43") ) - lyt1 <- core_lyt %>% + lyt1 <- core_lyt |> analyze( "TRTDURY", nested = FALSE, @@ -77,10 +77,10 @@ test_that("Check patient years numbers are giving expected result", { res1 <- cell_values(tbl1[c("TRTDURY", "patyrs"), "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) - adae_sub <- adae %>% - filter(ARM == "A: Drug X") %>% - group_by(USUBJID) %>% - slice(1) %>% + adae_sub <- adae |> + filter(ARM == "A: Drug X") |> + group_by(USUBJID) |> + slice(1) |> ungroup() expected <- sum(adae_sub$TRTDURY) @@ -92,7 +92,7 @@ test_that("Check patient years numbers are giving expected result", { }) test_that("Check aeir100 numbers are giving expected result", { - lyt1 <- core_lyt %>% + lyt1 <- core_lyt |> analyze( "AEDECOD", nested = FALSE, @@ -110,28 +110,28 @@ test_that("Check aeir100 numbers are giving expected result", { res1 <- cell_values(tbl1["dcd A.1.1.1.1", "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) - adae_onecode <- adae %>% - filter(AEDECOD == "dcd A.1.1.1.1" & !is.na(AOCCPFL)) %>% + adae_onecode <- adae |> + filter(AEDECOD == "dcd A.1.1.1.1" & !is.na(AOCCPFL)) |> select(USUBJID, AEDECOD, AEBODSYS, ASTDY, TRTEMFL, AOCCPFL) adae_sub <- left_join(adsl, adae_onecode, by = "USUBJID") - adae_sub <- adae_sub %>% - filter(ARM == "A: Drug X") %>% - arrange(USUBJID, AEDECOD, ASTDY) %>% - group_by(USUBJID) %>% - slice(1) %>% - ungroup() %>% + adae_sub <- adae_sub |> + filter(ARM == "A: Drug X") |> + arrange(USUBJID, AEDECOD, ASTDY) |> + group_by(USUBJID) |> + slice(1) |> + ungroup() |> mutate(EXP_TIME = if_else(!is.na(ASTDY), (ASTDY / 365.25), TRTDURY)) total_exp_years <- sum(adae_sub$EXP_TIME) - adae_sub2 <- adae %>% + adae_sub2 <- adae |> filter( AEDECOD == "dcd A.1.1.1.1" & ARM == "A: Drug X" & !is.na(AOCCPFL) - ) %>% - group_by(USUBJID) %>% - slice(1) %>% + ) |> + group_by(USUBJID) |> + slice(1) |> ungroup() number_with_event <- nrow(adae_sub2) @@ -145,7 +145,7 @@ test_that("Check aeir100 numbers are giving expected result", { }) test_that("Check aeir100 numbers are giving expected result when fup_var argument is changed", { - lyt1 <- core_lyt %>% + lyt1 <- core_lyt |> analyze( "AEDECOD", nested = FALSE, @@ -163,28 +163,28 @@ test_that("Check aeir100 numbers are giving expected result when fup_var argumen res1 <- cell_values(tbl1["dcd A.1.1.1.1", "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) - adae_onecode <- adae %>% - filter(AEDECOD == "dcd A.1.1.1.1" & !is.na(AOCCPFL)) %>% + adae_onecode <- adae |> + filter(AEDECOD == "dcd A.1.1.1.1" & !is.na(AOCCPFL)) |> select(USUBJID, AEDECOD, AEBODSYS, ASTDY, TRTEMFL, AOCCPFL) adae_sub <- left_join(adsl, adae_onecode, by = "USUBJID") - adae_sub <- adae_sub %>% - filter(ARM == "A: Drug X") %>% - arrange(USUBJID, AEDECOD, ASTDY) %>% - group_by(USUBJID) %>% - slice(1) %>% - ungroup() %>% + adae_sub <- adae_sub |> + filter(ARM == "A: Drug X") |> + arrange(USUBJID, AEDECOD, ASTDY) |> + group_by(USUBJID) |> + slice(1) |> + ungroup() |> mutate(EXP_TIME = if_else(!is.na(ASTDY), (ASTDY / 365.25), TRTDURY2)) total_exp_years <- sum(adae_sub$EXP_TIME) - adae_sub2 <- adae %>% + adae_sub2 <- adae |> filter( AEDECOD == "dcd A.1.1.1.1" & ARM == "A: Drug X" & !is.na(AOCCPFL) - ) %>% - group_by(USUBJID) %>% - slice(1) %>% + ) |> + group_by(USUBJID) |> + slice(1) |> ungroup() number_with_event <- nrow(adae_sub2) @@ -198,7 +198,7 @@ test_that("Check aeir100 numbers are giving expected result when fup_var argumen }) test_that("Check aeir100 numbers are giving expected result when occ_dy argument is changed", { - lyt1 <- core_lyt %>% + lyt1 <- core_lyt |> analyze( "AEDECOD", nested = FALSE, @@ -216,28 +216,28 @@ test_that("Check aeir100 numbers are giving expected result when occ_dy argument res1 <- cell_values(tbl1["dcd A.1.1.1.1", "A: Drug X"]) result <- as.numeric(unlist(unname(res1))[[1]]) - adae_onecode <- adae %>% - filter(AEDECOD == "dcd A.1.1.1.1" & !is.na(AOCCPFL)) %>% + adae_onecode <- adae |> + filter(AEDECOD == "dcd A.1.1.1.1" & !is.na(AOCCPFL)) |> select(USUBJID, AEDECOD, AEBODSYS, ASTDY2, TRTEMFL, AOCCPFL) adae_sub <- left_join(adsl, adae_onecode, by = "USUBJID") - adae_sub <- adae_sub %>% - filter(ARM == "A: Drug X") %>% - arrange(USUBJID, AEDECOD, ASTDY2) %>% - group_by(USUBJID) %>% - slice(1) %>% - ungroup() %>% + adae_sub <- adae_sub |> + filter(ARM == "A: Drug X") |> + arrange(USUBJID, AEDECOD, ASTDY2) |> + group_by(USUBJID) |> + slice(1) |> + ungroup() |> mutate(EXP_TIME = if_else(!is.na(ASTDY2), (ASTDY2 / 365.25), TRTDURY2)) total_exp_years <- sum(adae_sub$EXP_TIME) - adae_sub2 <- adae %>% + adae_sub2 <- adae |> filter( AEDECOD == "dcd A.1.1.1.1" & ARM == "A: Drug X" & !is.na(AOCCPFL) - ) %>% - group_by(USUBJID) %>% - slice(1) %>% + ) |> + group_by(USUBJID) |> + slice(1) |> ungroup() number_with_event <- nrow(adae_sub2) diff --git a/tests/testthat/test-pool_rbmi.R b/tests/testthat/test-pool_rbmi.R index e0ef1b51..6821f479 100644 --- a/tests/testthat/test-pool_rbmi.R +++ b/tests/testthat/test-pool_rbmi.R @@ -1,3 +1,9 @@ +if (requireNamespace("rbmi", quietly = TRUE)) { + suppressPackageStartupMessages(library(rbmi)) +} else { + skip("rbmi package not available") +} + library(rbmi) suppressMessages(require(mockery)) @@ -21,13 +27,15 @@ test_that("mod_pool_internal_rubin combines results correctly", { ) } - mock_parametric_ci <- function(point, - se, - alpha, - alternative, - qfun, - pfun, - df) { + mock_parametric_ci <- function( + point, + se, + alpha, + alternative, + qfun, + pfun, + df + ) { q_val <- qfun(1 - alpha / 2, df = df) ci <- switch(alternative, "two.sided" = c(point - q_val * se, point + q_val * se), @@ -142,10 +150,12 @@ test_that("pool function processes and returns combined results", { method = list(D = 1) ) - pool_no_validate <- function(results, - conf.level = 0.95, - alternative = c("two.sided", "less", "greater"), - type = c("percentile", "normal")) { + pool_no_validate <- function( + results, + conf.level = 0.95, + alternative = c("two.sided", "less", "greater"), + type = c("percentile", "normal") + ) { # Skip validation step rbmi::validate(results) alternative <- match.arg(alternative) @@ -236,7 +246,7 @@ test_that("pool function processes and returns combined results", { expect_equal(res$pars$param2$df, median(mock_results$param2$df)) }) -test_that("rbmi_pool works fine", { +test_that("Pool (Rubin) works as expected when se = NA in analysis model", { set.seed(101) mu <- 0 @@ -249,19 +259,16 @@ test_that("rbmi_pool works fine", { list("p1" = list(est = mean(x), se = NA, df = NA)) } - results_bayes <- as_analysis( - method = method_bayes(n_samples = 5000), + results_bayes <- rbmi_as_analysis( + method = rbmi::method_bayes(n_samples = 5000), results = lapply( seq_len(5000), function(x) runanalysis(sample(vals, size = n, replace = TRUE)) ) ) - - testthat::expect_equal(class(results_bayes$results)[1], "rubin") - - bayes <- rbmi_pool(results = results_bayes) - bayes2 <- rbmi_pool(results = results_bayes, conf.level = 0.8) - bayes3 <- rbmi_pool(results = results_bayes, conf.level = 0.8, alternative = "greater") + bayes <- rbmi_pool(results_bayes) + bayes2 <- rbmi_pool(results_bayes, conf.level = 0.8) + bayes3 <- rbmi_pool(results_bayes, alternative = "greater") expect_equal( bayes$pars$p1, @@ -270,7 +277,7 @@ test_that("rbmi_pool works fine", { ci = as.numeric(c(NA, NA)), se = as.numeric(NA), pvalue = as.numeric(NA), - df = as.logical(NA) + df = NA ), tolerance = 1e-2 ) @@ -282,7 +289,7 @@ test_that("rbmi_pool works fine", { ci = as.numeric(c(NA, NA)), se = as.numeric(NA), pvalue = as.numeric(NA), - df = as.logical(NA) + df = NA ), tolerance = 1e-2 ) @@ -294,7 +301,7 @@ test_that("rbmi_pool works fine", { ci = as.numeric(c(NA, NA)), se = as.numeric(NA), pvalue = as.numeric(NA), - df = as.logical(NA) + df = NA ), tolerance = 1e-2 ) @@ -303,8 +310,8 @@ test_that("rbmi_pool works fine", { list("p1" = list(est = mean(x), se = NA, df = Inf)) } - results_bayes <- as_analysis( - method = method_bayes(n_samples = 5000), + results_bayes <- rbmi_as_analysis( + method = rbmi::method_bayes(n_samples = 5000), results = lapply( seq_len(5000), function(x) runanalysis(sample(vals, size = n, replace = TRUE)) @@ -321,7 +328,7 @@ test_that("rbmi_pool works fine", { ci = as.numeric(c(NA, NA)), se = as.numeric(NA), pvalue = as.numeric(NA), - df = as.logical(NA) + df = NA ), tolerance = 1e-2 ) @@ -333,7 +340,7 @@ test_that("rbmi_pool works fine", { ci = as.numeric(c(NA, NA)), se = as.numeric(NA), pvalue = as.numeric(NA), - df = as.logical(NA) + df = NA ), tolerance = 1e-2 ) @@ -345,7 +352,7 @@ test_that("rbmi_pool works fine", { ci = as.numeric(c(NA, NA)), se = as.numeric(NA), pvalue = as.numeric(NA), - df = as.logical(NA) + df = NA ), tolerance = 1e-2 ) diff --git a/tests/testthat/test-proportions.R b/tests/testthat/test-proportions.R index 99db73ef..f5897d17 100644 --- a/tests/testthat/test-proportions.R +++ b/tests/testthat/test-proportions.R @@ -103,7 +103,7 @@ test_that("a_proportion_ci_factor works as expected", { }) test_that("prop_split_fun works as expected", { - result <- basic_table(round_type = "sas") %>% + result <- basic_table(round_type = "sas") |> split_cols_by("ID", split_fun = prop_split_fun) |> build_table(formatters::DM) expect_snapshot(result) diff --git a/tests/testthat/test-pruning_functions.R b/tests/testthat/test-pruning_functions.R index 203f5ccf..ba8271ed 100644 --- a/tests/testthat/test-pruning_functions.R +++ b/tests/testthat/test-pruning_functions.R @@ -2,12 +2,12 @@ library(dplyr) library(tern) # Pre-processing the table -tab <- basic_table(round_type = "sas") %>% - split_cols_by("ARM") %>% - split_rows_by("RACE") %>% - split_rows_by("STRATA1") %>% - summarize_row_groups() %>% - analyze_vars("COUNTRY", .stats = "count_fraction") %>% +tab <- basic_table(round_type = "sas") |> + split_cols_by("ARM") |> + split_rows_by("RACE") |> + split_rows_by("STRATA1") |> + summarize_row_groups() |> + analyze_vars("COUNTRY", .stats = "count_fraction") |> build_table(formatters::DM) trtvar <- "ARM" @@ -125,19 +125,19 @@ testthat::test_that("test keep_non_null_rows", { rcell(NULL, label = "") } - tabsx <- basic_table(round_type = "sas") %>% - split_cols_by("ARM") %>% - split_rows_by("ARM") %>% - analyze("ARM", afun = xnull_cell_fn, show_labels = "hidden") %>% - analyze("STRATA1", show_labels = "hidden") %>% + tabsx <- basic_table(round_type = "sas") |> + split_cols_by("ARM") |> + split_rows_by("ARM") |> + analyze("ARM", afun = xnull_cell_fn, show_labels = "hidden") |> + analyze("STRATA1", show_labels = "hidden") |> build_table(formatters::DM) result <- prune_table(tabsx, keep_rows(keep_non_null_rows)) - tabsx2 <- basic_table(round_type = "sas") %>% - split_cols_by("ARM") %>% - split_rows_by("ARM") %>% - analyze("STRATA1") %>% + tabsx2 <- basic_table(round_type = "sas") |> + split_cols_by("ARM") |> + split_rows_by("ARM") |> + analyze("STRATA1") |> build_table(formatters::DM) expected <- tabsx2 @@ -174,9 +174,9 @@ testthat::test_that("bspt_pruner both fraction and diff_from_control are NULL", }) testthat::test_that("bspt_pruner with fraction", { - tab_bspt_pruner <- basic_table(round_type = "sas") %>% - split_cols_by("ARM") %>% - analyze_vars("COUNTRY", .stats = "count_fraction") %>% + tab_bspt_pruner <- basic_table(round_type = "sas") |> + split_cols_by("ARM") |> + analyze_vars("COUNTRY", .stats = "count_fraction") |> build_table(formatters::DM) result <- prune_table( @@ -194,9 +194,9 @@ testthat::test_that("bspt_pruner with fraction", { }) testthat::test_that("bspt_pruner with fraction and diff_from_control", { - tab_bspt_pruner <- basic_table(round_type = "sas") %>% - split_cols_by("ARM") %>% - analyze_vars("COUNTRY", .stats = "count_fraction") %>% + tab_bspt_pruner <- basic_table(round_type = "sas") |> + split_cols_by("ARM") |> + analyze_vars("COUNTRY", .stats = "count_fraction") |> build_table(formatters::DM) result <- prune_table( @@ -216,9 +216,9 @@ testthat::test_that("bspt_pruner with fraction and diff_from_control", { }) testthat::test_that("bspt_pruner with fraction and diff_from_control and keeprowtext", { - tab_bspt_pruner <- basic_table(round_type = "sas") %>% - split_cols_by("ARM") %>% - analyze_vars("COUNTRY", .stats = "count_fraction") %>% + tab_bspt_pruner <- basic_table(round_type = "sas") |> + split_cols_by("ARM") |> + analyze_vars("COUNTRY", .stats = "count_fraction") |> build_table(formatters::DM) result1 <- prune_table( @@ -253,8 +253,8 @@ testthat::test_that("bspt_pruner with fraction and diff_from_control and keeprow }) testthat::test_that("count_pruner in small groups", { - DM_sub <- subset(DM, COUNTRY %in% c("USA", "CAN")) %>% - mutate(COUNTRY = factor(as.character(COUNTRY))) %>% + DM_sub <- subset(DM, COUNTRY %in% c("USA", "CAN")) |> + mutate(COUNTRY = factor(as.character(COUNTRY))) |> mutate( colspan_trt = factor( ifelse(ARM == "B: Placebo", " ", "Active Study Agent"), @@ -270,16 +270,16 @@ testthat::test_that("count_pruner in small groups", { ref_path = ref_path ) - tab_bspt_pruner <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% - split_cols_by("ARM") %>% - split_cols_by("rrisk_header", nested = FALSE) %>% + tab_bspt_pruner <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) |> + split_cols_by("ARM") |> + split_cols_by("rrisk_header", nested = FALSE) |> split_cols_by( "ARM", labels_var = "rrisk_label", split_fun = remove_split_levels("B: Placebo") - ) %>% - analyze("COUNTRY", afun = a_freq_j, extra_args = extra_args) %>% + ) |> + analyze("COUNTRY", afun = a_freq_j, extra_args = extra_args) |> build_table(DM_sub) result <- prune_table( @@ -298,7 +298,7 @@ testthat::test_that("bspt_pruner in AE like tables", { my_adsl <- bind_rows( as_tibble(cbind(ARM = rep("Group A", 100), USUBJID = paste0("A", 1:100))), as_tibble(cbind(ARM = rep("Group B", 100), USUBJID = paste0("B", 1:100))) - ) %>% + ) |> mutate(ARM = factor(ARM)) my_adsl$colspan <- factor( @@ -316,51 +316,51 @@ testthat::test_that("bspt_pruner in AE like tables", { my_adae <- bind_rows( - my_adsl %>% - filter(USUBJID %in% c(paste0("A", 1:4), paste0("B", 1:2))) %>% + my_adsl |> + filter(USUBJID %in% c(paste0("A", 1:4), paste0("B", 1:2))) |> mutate( AEBODSYS = "BODSYS1", AEDECOD = "Decod 1" ), - my_adsl %>% - filter(USUBJID %in% c(paste0("A", 5:6), paste0("B", 3:6))) %>% + my_adsl |> + filter(USUBJID %in% c(paste0("A", 5:6), paste0("B", 3:6))) |> mutate( AEBODSYS = "BODSYS1", AEDECOD = "Decod 2" ), - my_adsl %>% - filter(USUBJID %in% c(paste0("A", 1:6), paste0("B", 1:4))) %>% + my_adsl |> + filter(USUBJID %in% c(paste0("A", 1:6), paste0("B", 1:4))) |> mutate( AEBODSYS = "BODSYS2", AEDECOD = "Decod 3" ), - my_adsl %>% - filter(USUBJID %in% c(paste0("A", 1:6), paste0("B", 1:4))) %>% + my_adsl |> + filter(USUBJID %in% c(paste0("A", 1:6), paste0("B", 1:4))) |> mutate( AEBODSYS = "BODSYS2", AEDECOD = "Decod 4" ), - my_adsl %>% - filter(USUBJID %in% c(paste0("A", 1:6), paste0("B", 1:2))) %>% + my_adsl |> + filter(USUBJID %in% c(paste0("A", 1:6), paste0("B", 1:2))) |> mutate( AEBODSYS = "BODSYS3", AEDECOD = "Decod 5" ), - my_adsl %>% - filter(USUBJID %in% c(paste0("A", 1:2), paste0("B", 1:6))) %>% + my_adsl |> + filter(USUBJID %in% c(paste0("A", 1:2), paste0("B", 1:6))) |> mutate( AEBODSYS = "BODSYS3", AEDECOD = "Decod 6" ), - my_adsl %>% - filter(USUBJID %in% c(paste0("A", 1:8), paste0("B", 1:8))) %>% + my_adsl |> + filter(USUBJID %in% c(paste0("A", 1:8), paste0("B", 1:8))) |> mutate( AEBODSYS = "BODSYS3", AEDECOD = "Decod 7" ) - ) %>% - mutate(AEBODSYS = factor(AEBODSYS)) %>% - mutate(AEDECOD = factor(AEDECOD)) %>% + ) |> + mutate(AEBODSYS = factor(AEBODSYS)) |> + mutate(AEDECOD = factor(AEDECOD)) |> mutate(TRTEMFL = "Y") extra_args_rr <- list( @@ -369,39 +369,39 @@ testthat::test_that("bspt_pruner in AE like tables", { .stats = "count_unique_fraction" ) - tbl1 <- basic_table(show_colcounts = TRUE, top_level_section_div = " ", round_type = "sas") %>% - split_cols_by("colspan", split_fun = trim_levels_in_group("ARM")) %>% - split_cols_by("ARM") %>% - split_cols_by("rrisk_header", nested = FALSE) %>% + tbl1 <- basic_table(show_colcounts = TRUE, top_level_section_div = " ", round_type = "sas") |> + split_cols_by("colspan", split_fun = trim_levels_in_group("ARM")) |> + split_cols_by("ARM") |> + split_cols_by("rrisk_header", nested = FALSE) |> split_cols_by( "ARM", labels_var = "rrisk_label", split_fun = remove_split_levels("Group B") - ) %>% + ) |> analyze( "TRTEMFL", afun = a_freq_j, show_labels = "hidden", extra_args = append(extra_args_rr, list(label = "Subjects with >=1 AE")) - ) %>% + ) |> split_rows_by( "AEBODSYS", split_fun = trim_levels_in_group("AEDECOD"), section_div = c(" "), nested = FALSE - ) %>% + ) |> summarize_row_groups( "AEBODSYS", cfun = a_freq_j, extra_args = extra_args_rr - ) %>% + ) |> analyze( vars = "AEDECOD", afun = a_freq_j, indent_mod = 1L, show_labels = "hidden", extra_args = extra_args_rr - ) %>% + ) |> build_table(my_adae, my_adsl) result1 <- safe_prune_table( @@ -453,14 +453,14 @@ testthat::test_that("bspt_pruner in AE like tables", { testthat::test_that("bspt_pruner with less obvious control specifications", { - DM_sub <- formatters::DM %>% - mutate(COUNTRY = factor(as.character(COUNTRY))) %>% + DM_sub <- formatters::DM |> + mutate(COUNTRY = factor(as.character(COUNTRY))) |> mutate(SEX = factor(as.character(SEX))) - tab_bspt_pruner <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% - split_cols_by("SEX") %>% - analyze_vars("COUNTRY", .stats = "count_fraction") %>% + tab_bspt_pruner <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> + split_cols_by("SEX") |> + analyze_vars("COUNTRY", .stats = "count_fraction") |> build_table(DM_sub) rps_label <- make_row_df(tab_bspt_pruner)$label @@ -510,12 +510,12 @@ testthat::test_that("bspt_pruner with less obvious control specifications", { }) #### Tests for safe_prune_table function#### -my_DM <- formatters::DM %>% +my_DM <- formatters::DM |> filter(RACE == "THIS LEAVES EMPTY DF") -my_tab <- basic_table(round_type = "sas") %>% - split_cols_by("ARM") %>% - analyze("AGE") %>% +my_tab <- basic_table(round_type = "sas") |> + split_cols_by("ARM") |> + analyze("AGE") |> build_table(my_DM) testthat::test_that("check that if all data is pruned leaving no rows, the outcome is the message", { diff --git a/tests/testthat/test-rbmi.R b/tests/testthat/test-rbmi.R index f3e656ee..dab46ac5 100644 --- a/tests/testthat/test-rbmi.R +++ b/tests/testthat/test-rbmi.R @@ -1,9 +1,15 @@ suppressPackageStartupMessages({ library(testthat) - library(rbmi) library(dplyr) }) +if (requireNamespace("rbmi", quietly = TRUE)) { + suppressPackageStartupMessages(library(rbmi)) +} else { + skip("rbmi package not available") +} + + test_that("find_missing_chg_after_avisit works as expected", { df <- data.frame( AVISIT = factor(c(1, 2, 3, 4, 5)), @@ -207,27 +213,31 @@ test_that("make_rbmi_cluster loads rbmi namespaces correctly", { } }) -test_that("Parallisation works with rbmi_analyse and produces identical results", { +test_that("Parallelisation works with rbmi_analyse and produces identical results", { set.seed(4642) - dat <- rbmi::get_example_data() - n <- nrow(dat) - dat$age <- rnorm(n) - dat$sex <- factor( - sample(c("M", "F"), size = n, replace = TRUE), - levels = c("M", "F") + sigma <- as_vcov( + c(2, 1, 0.7, 1.5), + c(0.5, 0.3, 0.2, 0.3, 0.5, 0.4) ) - dat <- dat[, c("id", "visit", "outcome", "age", "group", "sex")] - - dat_ice <- dat %>% - group_by(id) %>% - arrange(id, visit) %>% - filter(is.na(outcome)) %>% - slice(1) %>% - ungroup() %>% - select(id, visit) %>% - mutate(strategy = "JR") - - vars <- set_vars( + dat <- get_sim_data(200, sigma, trt = 8) |> + dplyr::mutate( + outcome = dplyr::if_else( + rbinom(dplyr::n(), 1, 0.3) == 1 & group == "A", + NA_real_, + outcome + ) + ) + + dat_ice <- dat |> + dplyr::group_by(id) |> + dplyr::arrange(id, visit) |> + dplyr::filter(is.na(outcome)) |> + dplyr::slice(1) |> + dplyr::ungroup() |> + dplyr::select(id, visit) |> + dplyr::mutate(strategy = "JR") + + vars <- rbmi::set_vars( outcome = "outcome", group = "group", strategy = "strategy", @@ -236,17 +246,18 @@ test_that("Parallisation works with rbmi_analyse and produces identical results" covariates = c("age", "sex", "visit * group") ) - drawobj <- draws( + set.seed(984) + drawobj <- rbmi::draws( data = dat, data_ice = dat_ice, vars = vars, - method = method_condmean(n_samples = 6, type = "bootstrap"), + method = rbmi::method_condmean(n_samples = 6, type = "bootstrap"), quiet = TRUE ) - imputeobj <- impute( + imputeobj <- rbmi::impute( draws = drawobj, - references = c("Intervention" = "Control", "Control" = "Control") + references = c("A" = "B", "B" = "B") ) # @@ -256,32 +267,44 @@ test_that("Parallisation works with rbmi_analyse and produces identical results" ### Delta 1 - dat_delta_1 <- delta_template(imputations = imputeobj) %>% - mutate(delta = is_missing * 5) + dat_delta_1 <- rbmi::delta_template(imputations = imputeobj) |> + dplyr::mutate(delta = is_missing * 5) vars2 <- vars vars2$covariates <- c("age", "sex") anaobj_d1_t1 <- rbmi_analyse( imputeobj, - fun = rbmi::ancova, + fun = rbmi_ancova, vars = vars2, delta = dat_delta_1 ) anaobj_d1_t2 <- rbmi_analyse( imputeobj, - fun = rbmi::ancova, + fun = rbmi_ancova, vars = vars2, delta = dat_delta_1, cluster_or_cores = 2 ) - skip_on_cran() - cl <- make_rbmi_cluster(2) + var <- 20 + inner_fun <- function(...) { + x <- as_factor(var) # forcats::as_factor + rbmi_ancova(...) + } + outer_fun <- function(...) { + inner_fun(...) + } + + cl <- make_rbmi_cluster( + 2, + objects = list(var = var, inner_fun = inner_fun), + "forcats" + ) anaobj_d1_t3 <- rbmi_analyse( imputeobj, - fun = rbmi::ancova, + fun = rbmi_ancova, vars = vars2, delta = dat_delta_1, cluster_or_cores = cl @@ -289,18 +312,18 @@ test_that("Parallisation works with rbmi_analyse and produces identical results" ### Delta 2 - dat_delta_2 <- delta_template(imputations = imputeobj) %>% - mutate(delta = is_missing * 50) + dat_delta_2 <- rbmi::delta_template(imputations = imputeobj) |> + dplyr::mutate(delta = is_missing * 50) anaobj_d2_t1 <- rbmi_analyse( imputeobj, - fun = rbmi::ancova, + fun = rbmi_ancova, vars = vars2, delta = dat_delta_2 ) anaobj_d2_t3 <- rbmi_analyse( imputeobj, - fun = rbmi::ancova, + fun = rbmi_ancova, vars = vars2, delta = dat_delta_2, cluster_or_cores = cl @@ -310,12 +333,12 @@ test_that("Parallisation works with rbmi_analyse and produces identical results" anaobj_d3_t1 <- rbmi_analyse( imputeobj, - fun = rbmi::ancova, + fun = rbmi_ancova, vars = vars2 ) anaobj_d3_t3 <- rbmi_analyse( imputeobj, - fun = rbmi::ancova, + fun = rbmi_ancova, vars = vars2, cluster_or_cores = cl ) diff --git a/tests/testthat/test-remove_col_count.R b/tests/testthat/test-remove_col_count.R index 6b79b2fd..9cea9dce 100644 --- a/tests/testthat/test-remove_col_count.R +++ b/tests/testthat/test-remove_col_count.R @@ -14,10 +14,10 @@ testthat::test_that("remove_col_count works", { show_colcounts = TRUE, colcount_format = "N=xx", round_type = "sas" - ) %>% - split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% - split_cols_by("ARM") %>% - split_cols_by("set2", nested = FALSE) %>% + ) |> + split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) |> + split_cols_by("ARM") |> + split_cols_by("set2", nested = FALSE) |> split_cols_by("ARM", split_fun = remove_split_levels("B: Placebo")) tbl <- build_table(lyt, adsl) diff --git a/tests/testthat/test-resp01_functions.R b/tests/testthat/test-resp01_functions.R index 4fbad223..c38473e2 100644 --- a/tests/testthat/test-resp01_functions.R +++ b/tests/testthat/test-resp01_functions.R @@ -5,8 +5,8 @@ test_that("resp01_split_fun_fct 1 works as expected", { method = "or_cmh", conf_level = 0.95 ) - result <- basic_table(round_type = "sas") %>% - split_cols_by("ARM", split_fun = add_overall_level("Overall")) %>% + result <- basic_table(round_type = "sas") |> + split_cols_by("ARM", split_fun = add_overall_level("Overall")) |> split_cols_by("ID", split_fun = split_fun) |> build_table(formatters::DM) expect_snapshot(result) @@ -17,8 +17,8 @@ test_that("resp01_split_fun_fct 2 works as expected", { method = "rr", conf_level = 0.92 ) - result <- basic_table(round_type = "sas") %>% - split_cols_by("ARM", split_fun = add_overall_level("Overall")) %>% + result <- basic_table(round_type = "sas") |> + split_cols_by("ARM", split_fun = add_overall_level("Overall")) |> split_cols_by("ID", split_fun = split_fun) |> build_table(formatters::DM) expect_snapshot(col_info(result)) diff --git a/tests/testthat/test-response_by_var.R b/tests/testthat/test-response_by_var.R index 99fb43d3..4682664c 100644 --- a/tests/testthat/test-response_by_var.R +++ b/tests/testthat/test-response_by_var.R @@ -6,18 +6,18 @@ adsl <- ex_adsl adae <- ex_adae adae$TRTEMFL <- "Y" -had_ae <- adae %>% - filter(TRTEMFL == "Y") %>% - select(USUBJID, TRTEMFL) %>% +had_ae <- adae |> + filter(TRTEMFL == "Y") |> + select(USUBJID, TRTEMFL) |> distinct(USUBJID, .keep_all = TRUE) -adsl <- adsl %>% - left_join(had_ae, by = "USUBJID") %>% +adsl <- adsl |> + left_join(had_ae, by = "USUBJID") |> mutate(TRTEMFL = ifelse(is.na(TRTEMFL), "N", "Y")) test_that("response_by_var various scenarios", { - lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") %>% - split_cols_by("ARM") %>% + lyt <- basic_table(show_colcounts = TRUE, round_type = "sas") |> + split_cols_by("ARM") |> analyze( vars = "SEX", var_labels = "Sex, n/Ns (%)", @@ -51,7 +51,7 @@ test_that("response_by_var various scenarios", { expect_snapshot(tbl2) ## Scenario 3: TRTEMFL has missing values and Y only, and analysis variable has missing values - adsl3 <- adsl %>% select(USUBJID, ARM, SEX, TRTEMFL) + adsl3 <- adsl |> select(USUBJID, ARM, SEX, TRTEMFL) adsl3$TRTEMFL <- ifelse(adsl3$TRTEMFL == "Y", "Y", NA) adsl3$TRTEMFL <- factor(adsl3$TRTEMFL, levels = "Y") adsl3$SEX[1:10] <- NA_character_ @@ -71,7 +71,7 @@ test_that("response_by_var various scenarios", { expect_snapshot(tbl3) ## Scenario 4: TRTEMFL has missing values and Y/N, and analysis variable has missing values - adsl4 <- adsl %>% select(USUBJID, ARM, SEX, TRTEMFL) + adsl4 <- adsl |> select(USUBJID, ARM, SEX, TRTEMFL) adsl4$SEX[1:10] <- NA_character_ adsl4$TRTEMFL[8:15] <- NA_character_ @@ -90,7 +90,7 @@ test_that("response_by_var various scenarios", { expect_snapshot(tbl4) ## Scenario 5: Analysis variable has a level not observed in data - adsl5 <- adsl %>% select(USUBJID, ARM, SEX, TRTEMFL) + adsl5 <- adsl |> select(USUBJID, ARM, SEX, TRTEMFL) adsl5$SEX <- factor( as.character(adsl5$SEX), levels = c(levels(adsl5$SEX), "extra level") diff --git a/tests/testthat/test-sorting_functions.R b/tests/testthat/test-sorting_functions.R index a3dd9cfe..bb355f0e 100644 --- a/tests/testthat/test-sorting_functions.R +++ b/tests/testthat/test-sorting_functions.R @@ -10,12 +10,12 @@ DM2$spanhead <- factor( levels = c("This is a Spanning Header", " ") ) -tab <- basic_table(round_type = "sas") %>% - split_cols_by("spanhead", split_fun = trim_levels_in_group("ARM")) %>% - split_cols_by("ARM") %>% - split_rows_by("STRATA1") %>% - summarize_row_groups() %>% - analyze_vars("COUNTRY", .stats = "count_fraction") %>% +tab <- basic_table(round_type = "sas") |> + split_cols_by("spanhead", split_fun = trim_levels_in_group("ARM")) |> + split_cols_by("ARM") |> + split_rows_by("STRATA1") |> + summarize_row_groups() |> + analyze_vars("COUNTRY", .stats = "count_fraction") |> build_table(DM2) #### Tests for jj_complex_scorefun function #### @@ -131,13 +131,13 @@ testthat::test_that("jj_complex_scorefun places specified category at the end: l testthat::expect_identical(result, expected) }) -tab2 <- basic_table(round_type = "sas") %>% - split_cols_by("spanhead", split_fun = trim_levels_in_group("ARM")) %>% - split_cols_by("ARM") %>% - split_cols_by("RACE") %>% - split_rows_by("STRATA1") %>% - summarize_row_groups() %>% - analyze_vars("COUNTRY", .stats = "count_fraction") %>% +tab2 <- basic_table(round_type = "sas") |> + split_cols_by("spanhead", split_fun = trim_levels_in_group("ARM")) |> + split_cols_by("ARM") |> + split_cols_by("RACE") |> + split_rows_by("STRATA1") |> + summarize_row_groups() |> + analyze_vars("COUNTRY", .stats = "count_fraction") |> build_table(DM2) testthat::test_that("jj_complex_scorefun uses first column to sort: usefirstcol", { diff --git a/tests/testthat/test-split_functions.R b/tests/testthat/test-split_functions.R index 2c22b465..e4c99c35 100644 --- a/tests/testthat/test-split_functions.R +++ b/tests/testthat/test-split_functions.R @@ -27,8 +27,8 @@ testthat::test_that("cond_rm_facets works", { show_colcounts = TRUE, colcount_format = "N=xx", round_type = "sas" - ) %>% - split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% + ) |> + split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) |> split_cols_by("ARM", split_fun = mysplit) tbl <- build_table(lyt, adsl) @@ -56,8 +56,8 @@ testthat::test_that("rm_levels works", { pre = list(rm_levels(excl = c("JPN", "USA", "NGA"))) ) - lyt <- basic_table(round_type = "sas") %>% - split_rows_by("COUNTRY", split_fun = split_fun) %>% + lyt <- basic_table(round_type = "sas") |> + split_rows_by("COUNTRY", split_fun = split_fun) |> summarize_row_groups() # for simplicity tbl <- build_table(lyt, adsl) @@ -77,8 +77,8 @@ testthat::test_that("real_add_overall_facet works", { post = list(real_add_overall_facet("Overall", "Overall")) ) - lyt <- basic_table(round_type = "sas") %>% - split_rows_by("COUNTRY", split_fun = split_fun) %>% + lyt <- basic_table(round_type = "sas") |> + split_rows_by("COUNTRY", split_fun = split_fun) |> summarize_row_groups() # for simplicity tbl <- build_table(lyt, adsl) @@ -101,8 +101,8 @@ testthat::test_that("make_combo_splitfun works", { levels = c("USA", "CAN") ) - lyt <- basic_table(round_type = "sas") %>% - split_rows_by("COUNTRY", split_fun = split_fun) %>% + lyt <- basic_table(round_type = "sas") |> + split_rows_by("COUNTRY", split_fun = split_fun) |> summarize_row_groups() # for simplicity tbl <- build_table(lyt, adsl) @@ -138,8 +138,8 @@ testthat::test_that("combine_nonblank works", { split_fun <- make_split_fun(post = list(combine_nonblank("Overall", "Overall"))) - lyt <- basic_table(round_type = "sas") %>% - split_rows_by("COUNTRY", split_fun = split_fun) %>% + lyt <- basic_table(round_type = "sas") |> + split_rows_by("COUNTRY", split_fun = split_fun) |> summarize_row_groups() # for simplicity tbl <- build_table(lyt, adsl) @@ -173,14 +173,14 @@ testthat::test_that("rm_blank_levels works", { pre = list(rm_blank_levels) ) - lyt <- basic_table(round_type = "sas") %>% - split_rows_by("COUNTRY") %>% + lyt <- basic_table(round_type = "sas") |> + split_rows_by("COUNTRY") |> summarize_row_groups() tbl <- build_table(lyt, adsl) row_names_before <- rtables::row.names(tbl) - lyt <- basic_table(round_type = "sas") %>% - split_rows_by("COUNTRY", split_fun = split_fun) %>% + lyt <- basic_table(round_type = "sas") |> + split_rows_by("COUNTRY", split_fun = split_fun) |> summarize_row_groups() tbl <- build_table(lyt, adsl) row_names_after <- rtables::row.names(tbl) diff --git a/tests/testthat/test-summarize_ancova.R b/tests/testthat/test-summarize_ancova.R index a078c8dd..3256783e 100644 --- a/tests/testthat/test-summarize_ancova.R +++ b/tests/testthat/test-summarize_ancova.R @@ -143,9 +143,9 @@ test_that("s_summarize_ancova works as expected", { }) test_that("a_summarize_ancova_j works as expected in table layout", { - result <- basic_table(round_type = "sas") %>% - split_cols_by("Species") %>% - add_colcounts() %>% + result <- basic_table(round_type = "sas") |> + split_cols_by("Species") |> + add_colcounts() |> analyze( vars = "Petal.Length", afun = a_summarize_ancova_j, @@ -167,7 +167,7 @@ test_that("a_summarize_ancova_j works as expected in table layout", { "pval" ) ) - ) %>% + ) |> analyze( vars = "Petal.Length", afun = a_summarize_ancova_j, @@ -187,7 +187,7 @@ test_that("a_summarize_ancova_j works as expected in table layout", { "pval" ) ) - ) %>% + ) |> build_table(iris) expect_snapshot(result) }) diff --git a/tests/testthat/test-summarize_row_counts.R b/tests/testthat/test-summarize_row_counts.R index 7f540a5a..e2ae1c9f 100644 --- a/tests/testthat/test-summarize_row_counts.R +++ b/tests/testthat/test-summarize_row_counts.R @@ -1,11 +1,11 @@ library(rtables) test_that("summarize_row_counts works as expected without alt counts", { - lyt <- basic_table() %>% - split_cols_by("ARM") %>% - add_colcounts() %>% - split_rows_by("RACE", split_fun = drop_split_levels) %>% - summarize_row_counts(label_fstr = "Race: %s", alt_counts = FALSE) %>% + lyt <- basic_table() |> + split_cols_by("ARM") |> + add_colcounts() |> + split_rows_by("RACE", split_fun = drop_split_levels) |> + summarize_row_counts(label_fstr = "Race: %s", alt_counts = FALSE) |> analyze("AGE", afun = list_wrap_x(summary), format = "xx.xx") res <- expect_silent(build_table(lyt, formatters::DM)) @@ -18,11 +18,11 @@ test_that("summarize_row_counts works as expected without alt counts", { }) test_that("summarize_row_counts works as expected with alt counts", { - lyt <- basic_table() %>% - split_cols_by("ARM") %>% - add_colcounts() %>% - split_rows_by("RACE", split_fun = drop_split_levels) %>% - summarize_row_counts(label_fstr = "RACE value - %s") %>% + lyt <- basic_table() |> + split_cols_by("ARM") |> + add_colcounts() |> + split_rows_by("RACE", split_fun = drop_split_levels) |> + summarize_row_counts(label_fstr = "RACE value - %s") |> analyze("AGE", afun = list_wrap_x(summary), format = "xx.xx") res <- expect_silent(build_table( diff --git a/tests/testthat/test-tabulate_lsmeans.R b/tests/testthat/test-tabulate_lsmeans.R index 4a732ae4..6f9116c6 100644 --- a/tests/testthat/test-tabulate_lsmeans.R +++ b/tests/testthat/test-tabulate_lsmeans.R @@ -68,12 +68,12 @@ test_that("summarize_lsmeans can show two- and one-sided p-values correctly", { names(df) ) - dat_adsl <- mmrm::fev_data %>% - select(USUBJID, ARMCD) %>% + dat_adsl <- mmrm::fev_data |> + select(USUBJID, ARMCD) |> unique() - start_lyt <- basic_table() %>% - split_cols_by("ARMCD") %>% - add_colcounts() %>% + start_lyt <- basic_table() |> + split_cols_by("ARMCD") |> + add_colcounts() |> split_rows_by("AVISIT") lyt_two_sided <- start_lyt |> diff --git a/tests/testthat/test-tabulate_rbmi.R b/tests/testthat/test-tabulate_rbmi.R index 81db9f7a..7fcd7c2e 100644 --- a/tests/testthat/test-tabulate_rbmi.R +++ b/tests/testthat/test-tabulate_rbmi.R @@ -1,5 +1,11 @@ library(broom) +if (requireNamespace("rbmi", quietly = TRUE)) { + suppressPackageStartupMessages(library(rbmi)) +} else { + skip("rbmi package not available") +} + # rbmi_test_data ---- rbmi_test_data_ancova <- structure( @@ -284,9 +290,9 @@ test_that("s_rbmi_lsmeans also works with show_relative = increase", { test_that("a_rbmi_lsmeans works as expected in table layout", { df <- tidy(rbmi_test_data_ancova, visits = c("4", "5")) - lyt <- basic_table() %>% - split_cols_by("group") %>% - split_rows_by("visit", split_label = "Visit", label_pos = "topleft") %>% + lyt <- basic_table() |> + split_cols_by("group") |> + split_rows_by("visit", split_label = "Visit", label_pos = "topleft") |> analyze( "group", afun = a_rbmi_lsmeans, diff --git a/tests/testthat/test-tern_utils_default_stats_formats_labels.R b/tests/testthat/test-tern_utils_default_stats_formats_labels.R index 33f7408c..dd7ee94f 100644 --- a/tests/testthat/test-tern_utils_default_stats_formats_labels.R +++ b/tests/testthat/test-tern_utils_default_stats_formats_labels.R @@ -176,7 +176,7 @@ testthat::test_that("tern_get_labels_from_stats works as expected", { names(stats_to_do), labels_in = c(stats_to_do, "catch_me" = "xx") ), - stats_to_do %>% as.list() + stats_to_do |> as.list() ) }) @@ -206,7 +206,7 @@ testthat::test_that("tern_get_labels_from_stats with labels in works when adding "count_fraction.a" = "any A", "count_fraction.b" = "CF: B", "count_fraction.c" = "Lvl c:" - ) %>% + ) |> as.list() ) }) @@ -268,7 +268,7 @@ testthat::test_that("tern_get_indents_from_stats works as expected", { c(names(stats_to_do), "n"), indents_in = stats_to_do ), - c(stats_to_do, n = 0L) %>% as.list() + c(stats_to_do, n = 0L) |> as.list() ) }) diff --git a/tests/testthat/test-test_proportion_diff.R b/tests/testthat/test-test_proportion_diff.R index d5a3e5b5..fbe18cd7 100644 --- a/tests/testthat/test-test_proportion_diff.R +++ b/tests/testthat/test-test_proportion_diff.R @@ -45,8 +45,8 @@ test_that("a_test_proportion_diff works as expected in table layout", { strata = factor(rep(c("V", "W", "X", "Y", "Z"), each = 20)) ) - l <- basic_table() %>% - split_cols_by(var = "grp") %>% + l <- basic_table() |> + split_cols_by(var = "grp") |> analyze( vars = "rsp", afun = a_test_proportion_diff, diff --git a/tests/testthat/test-tt_to_tblfile.R b/tests/testthat/test-tt_to_tblfile.R index cebbc81f..4e15104b 100644 --- a/tests/testthat/test-tt_to_tblfile.R +++ b/tests/testthat/test-tt_to_tblfile.R @@ -121,8 +121,8 @@ test_that("tt_to_tlgrtf works with argument combined_rtf = TRUE", { test_that("tt_to_tlgrtf converts table tree to tlg without error", { # Create a simple table for testing - lyt <- basic_table() %>% - split_cols_by("ARM") %>% + lyt <- basic_table() |> + split_cols_by("ARM") |> analyze("AGE") tbl <- build_table(lyt, ex_adsl) @@ -142,9 +142,21 @@ test_that("tt_to_tlgrtf converts table tree to tlg without error", { badtbl <- build_table(badlyt, ex_adsl) - ## this test is largely meaningless because it doesn't get caught - ## when calling tt_to_tlgrtf directly.... + ## Test that an error is issued when validate=TRUE (default behavior) expect_error(tt_to_tbldf(badtbl)) + expect_error(tt_to_tbldf(badtbl, validate = TRUE)) + + ## Test that a message is issued when validate=FALSE + expect_message( + tt_to_tbldf(badtbl, validate = FALSE), + "Invalid table structure detected" + ) + + ## Test that a different message is issued for valid tables when validate=FALSE + expect_message( + tt_to_tbldf(tbl, validate = FALSE), + "Table structure validation succeeded" + ) lyt_pgby <- basic_table() |> split_cols_by("ARM") |> @@ -206,25 +218,25 @@ test_that("tt_to_tlgrtf validates table structure correctly", { badtbl <- build_table(badlyt, ex_adsl) - # Test that a warning is issued when validate=TRUE - expect_warning( + # Test that a message is issued when validate=TRUE + expect_message( tt_to_tlgrtf(badtbl, file = NULL, validate = TRUE), "Invalid table structure detected" ) - # Test that no warning is issued when validate=FALSE - expect_no_warning( + # Test that no message is issued when validate=FALSE + expect_no_message( tt_to_tlgrtf(badtbl, file = NULL, validate = FALSE) ) - # Test that the default behavior (validate=TRUE) issues a warning - expect_warning( + # Test that the default behavior (validate=TRUE) issues a message + expect_message( tt_to_tlgrtf(badtbl, file = NULL), "Invalid table structure detected" ) }) -test_that("tt_to_tlgrtf does not warn for valid table structures", { +test_that("tt_to_tlgrtf does not message for valid table structures with validate=TRUE", { # Create a valid table structure data(ex_adsl) lyt <- basic_table() |> @@ -233,38 +245,31 @@ test_that("tt_to_tlgrtf does not warn for valid table structures", { tbl <- build_table(lyt, ex_adsl) - # Test that no warning is issued for a valid table - expect_no_warning( + # Test that no message is issued for a valid table with validate=TRUE + expect_no_message( tt_to_tlgrtf(tbl, file = NULL, validate = TRUE) ) }) -test_that("tt_to_tlgrtf respects JUNCO_DISABLE_VALIDATION environment variable", { - # Create an invalid table structure +test_that("tt_to_tlgrtf validates valid table structures correctly", { + # Create a valid table structure data(ex_adsl) - badlyt <- basic_table() |> - split_rows_by("ARM") |> - summarize_row_groups() - - badtbl <- build_table(badlyt, ex_adsl) - - # Save current environment variable state - old_env <- Sys.getenv("JUNCO_DISABLE_VALIDATION") + lyt <- basic_table() |> + split_cols_by("ARM") |> + analyze("AGE") - # Set environment variable to disable validation - Sys.setenv(JUNCO_DISABLE_VALIDATION = "TRUE") + tbl <- build_table(lyt, ex_adsl) - # Test that no warning is issued when environment variable is set - expect_no_warning( - tt_to_tlgrtf(badtbl, file = NULL, validate = TRUE) + # Test that a message is issued for valid tables when validate=FALSE + expect_message( + tt_to_tlgrtf(tbl, file = NULL, validate = FALSE), + "Table structure validation succeeded" ) - # Reset environment variable - if (old_env == "") { - Sys.unsetenv("JUNCO_DISABLE_VALIDATION") - } else { - Sys.setenv(JUNCO_DISABLE_VALIDATION = old_env) - } + # Test that no message is issued for valid tables when validate=TRUE + expect_no_message( + tt_to_tlgrtf(tbl, file = NULL, validate = TRUE) + ) }) test_that("more top left than col headers works", { diff --git a/tests/testthat/test-varia.R b/tests/testthat/test-varia.R index 07e23608..48de980a 100644 --- a/tests/testthat/test-varia.R +++ b/tests/testthat/test-varia.R @@ -12,7 +12,7 @@ testthat::test_that("a_freq_j works (old count_unq case)", { top_level_section_div = " ", show_colcounts = TRUE, colcount_format = "N=xx" - ) %>% + ) |> analyze( "TRTEMFL", afun = a_freq_j, @@ -36,7 +36,7 @@ testthat::test_that("a_freq_j works (old count_unq case)", { top_level_section_div = " ", show_colcounts = TRUE, colcount_format = "N=xx" - ) %>% + ) |> analyze( "TRTEMFL", afun = a_freq_j, @@ -58,7 +58,7 @@ testthat::test_that("a_freq_j works (old count_unq case)", { testthat::test_that("a_freq_subcol_j works (old case of cpct_subcol)", { adsl <- ex_adsl - adae <- ex_adae %>% dplyr::select(USUBJID, AEBODSYS, AEDECOD, AREL) + adae <- ex_adae |> dplyr::select(USUBJID, AEBODSYS, AEDECOD, AREL) adae$TRTEMFL <- "Y" adsl$COLSPAN_REL <- "AEs" @@ -87,12 +87,12 @@ testthat::test_that("a_freq_subcol_j works (old case of cpct_subcol)", { top_level_section_div = " ", show_colcounts = TRUE, colcount_format = "N=xx" - ) %>% + ) |> split_cols_by( "COLSPAN_REL", split_fun = add_combo_levels(combodf, trim = TRUE) - ) %>% - split_cols_by("ARM") %>% + ) |> + split_cols_by("ARM") |> analyze( "TRTEMFL", afun = a_freq_subcol_j, @@ -125,7 +125,7 @@ testthat::test_that("a_freq_subcol_j works (old case of cpct_subcol)", { }) testthat::test_that("a_freq_combos_j (old cpct_filter_combos case) works", { - adsl <- ex_adsl %>% + adsl <- ex_adsl |> mutate( months = (EOSDY + 30) / 30.4375, ACAT1 = case_when( @@ -139,16 +139,16 @@ testthat::test_that("a_freq_combos_j (old cpct_filter_combos case) works", { ACAT1, levels = c("Within 3 months", "4 to 12 months", "Beyond 13 months") ) - ) %>% + ) |> select(USUBJID, ARM, EOSDY, ACAT1, months) - adae <- ex_adae %>% dplyr::select(USUBJID, AEBODSYS, AEDECOD, ASTDY, ARM) + adae <- ex_adae |> dplyr::select(USUBJID, AEBODSYS, AEDECOD, ASTDY, ARM) adae$TRTEMFL <- "Y" adae$TRTEMFL <- factor(adae$TRTEMFL) - adae <- adae %>% + adae <- adae |> # ACAT1 derivation - mutate(months = (ASTDY + 30) / 30.4375) %>% + mutate(months = (ASTDY + 30) / 30.4375) |> mutate( ACAT1 = case_when( months <= 3 ~ "Within 3 months", @@ -156,7 +156,7 @@ testthat::test_that("a_freq_combos_j (old cpct_filter_combos case) works", { months > 12 ~ "Beyond 13 months", .default = NA_character_ ) - ) %>% + ) |> mutate( ACAT1 = factor( ACAT1, @@ -167,11 +167,11 @@ testthat::test_that("a_freq_combos_j (old cpct_filter_combos case) works", { NA ) ) - ) %>% - select(-months) %>% + ) |> + select(-months) |> # first occurrence derivation - arrange(USUBJID, ASTDY) %>% - group_by(USUBJID) %>% + arrange(USUBJID, ASTDY) |> + group_by(USUBJID) |> mutate(AOCCFL = case_when(row_number() == 1 ~ "Y")) # This df generates facets for column space : levels from adsl ACAT1 need to be cumulative @@ -211,8 +211,8 @@ testthat::test_that("a_freq_combos_j (old cpct_filter_combos case) works", { "Subjects with >= 1 AE" ) - lyt <- basic_table(top_level_section_div = " ", show_colcounts = TRUE) %>% - split_cols_by("ARM") %>% + lyt <- basic_table(top_level_section_div = " ", show_colcounts = TRUE) |> + split_cols_by("ARM") |> split_cols_by( "ACAT1", split_fun = add_combo_levels( @@ -220,7 +220,7 @@ testthat::test_that("a_freq_combos_j (old cpct_filter_combos case) works", { trim = FALSE, keep_levels = combodf$valname ) - ) %>% + ) |> analyze( "TRTEMFL", nested = FALSE, @@ -247,20 +247,20 @@ testthat::test_that("a_freq_combos_j (old cpct_filter_combos case) works", { n_denom <- nrow( unique( - adsl %>% + adsl |> filter( ARM == "A: Drug X" & ACAT1 %in% c("4 to 12 months", "Beyond 13 months") - ) %>% + ) |> select(USUBJID) ) ) n_val <- nrow(unique( - adae %>% + adae |> filter( ARM == "A: Drug X" & ACAT1 %in% c("4 to 12 months") & AOCCFL == "Y" - ) %>% + ) |> select(USUBJID) )) @@ -283,7 +283,7 @@ testthat::test_that("`a_freq_j()` works", { adsl$rrisk_header <- "Risk Difference (%) (95% CI)" adsl$rrisk_label <- paste(adsl[["ARM"]], "vs Placebo") - advs <- ex_advs %>% select(USUBJID, PARAMCD, PARAM, AVISIT, ANRIND) + advs <- ex_advs |> select(USUBJID, PARAMCD, PARAM, AVISIT, ANRIND) advs <- dplyr::inner_join(advs, adsl, by = c("USUBJID")) advs <- advs[advs$AVISIT %in% c("BASELINE", "WEEK 1 DAY 8"), ] @@ -291,9 +291,9 @@ testthat::test_that("`a_freq_j()` works", { advs$AVISIT <- factor(as.character(advs$AVISIT)) advs$PARAM <- factor(as.character(advs$PARAM)) - advs <- advs %>% - group_by(ARM, PARAMCD, AVISIT) %>% - arrange(USUBJID) %>% + advs <- advs |> + group_by(ARM, PARAMCD, AVISIT) |> + arrange(USUBJID) |> mutate(id = row_number()) # set ANRIND to missing for first 10 subjects from each arm @@ -311,17 +311,17 @@ testthat::test_that("`a_freq_j()` works", { lyt <- basic_table( show_colcounts = TRUE, colcount_format = "N=xx" - ) %>% - split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% - split_cols_by("ARM") %>% - split_cols_by("rrisk_header", nested = FALSE) %>% + ) |> + split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) |> + split_cols_by("ARM") |> + split_cols_by("rrisk_header", nested = FALSE) |> split_cols_by( "ARM", labels_var = "rrisk_label", split_fun = remove_split_levels("B: Placebo") - ) %>% - split_rows_by("PARAM", label_pos = "topleft", section_div = " ") %>% - split_rows_by("AVISIT") %>% + ) |> + split_rows_by("PARAM", label_pos = "topleft", section_div = " ") |> + split_rows_by("AVISIT") |> analyze( "ANRIND", afun = a_freq_j, @@ -340,19 +340,19 @@ testthat::test_that("`a_freq_j()` works", { n_expected <- as.double( table( - advs %>% - ungroup() %>% + advs |> + ungroup() |> filter( PARAM == "Diastolic Blood Pressure" & AVISIT == "WEEK 1 DAY 8" & !is.na(ANRIND) - ) %>% + ) |> mutate( ARM = factor( as.character(ARM), levels = c("A: Drug X", "C: Combination", "B: Placebo") ) - ) %>% + ) |> select(ARM) ) ) @@ -384,10 +384,10 @@ testthat::test_that("a_freq_j works (old count_subject case)", { ) # scenario 1: cfun - subgroup coming from adsl, not df - lyt1 <- basic_table(show_colcounts = TRUE) %>% - split_cols_by("ARM") %>% - split_rows_by("SEX") %>% - summarize_row_groups("SEX", cfun = a_freq_j, extra_args = extra_args_1) %>% + lyt1 <- basic_table(show_colcounts = TRUE) |> + split_cols_by("ARM") |> + split_rows_by("SEX") |> + summarize_row_groups("SEX", cfun = a_freq_j, extra_args = extra_args_1) |> analyze( "TRTEMFL", afun = simple_afun, @@ -405,10 +405,10 @@ testthat::test_that("a_freq_j works (old count_subject case)", { ## 2 cell value : content + n n_1 <- nrow(unique( - adsl %>% filter(ARM == "B: Placebo" & SEX == "F") %>% select(USUBJID) + adsl |> filter(ARM == "B: Placebo" & SEX == "F") |> select(USUBJID) )) n_2 <- nrow(unique( - adae %>% filter(ARM == "B: Placebo" & SEX == "F") %>% select(USUBJID) + adae |> filter(ARM == "B: Placebo" & SEX == "F") |> select(USUBJID) )) expected1 <- c(n_1, n_2) @@ -416,28 +416,27 @@ testthat::test_that("a_freq_j works (old count_subject case)", { testthat::expect_identical(result1, expected1) # scenario 2: shift table for lab/vs - advs <- ex_advs %>% - select(USUBJID, PARAMCD, PARAM, ANRIND, AVAL, BASE, ABLFL, AVISIT) %>% + advs <- ex_advs |> + select(USUBJID, PARAMCD, PARAM, ANRIND, AVAL, BASE, ABLFL, AVISIT) |> filter(PARAMCD == "DIABP" & (AVISIT == "WEEK 1 DAY 8" | ABLFL == "Y")) ### remove subjects from advs, so that N is not same as from adsl advs <- advs[!(advs$USUBJID %in% adsl$USUBJID[1:10]), ] - BNRIND <- advs %>% - filter(ABLFL == "Y") %>% - mutate(BNRIND = ANRIND) %>% + BNRIND <- advs |> + filter(ABLFL == "Y") |> + mutate(BNRIND = ANRIND) |> select(USUBJID, PARAMCD, BNRIND) - advs <- advs %>% - left_join(., BNRIND, by = join_by(USUBJID, PARAMCD)) + advs <- left_join(advs, BNRIND, by = join_by(USUBJID, PARAMCD)) - adsl <- adsl %>% - mutate(BNRIND = "N") %>% + adsl <- adsl |> + mutate(BNRIND = "N") |> mutate(BNRIND = factor(BNRIND, levels = c("N", levels(advs$ANRIND)))) adsl$BNRIND_header <- " " adsl$BNRIND_header2 <- "Baseline NRIND" - advs <- advs %>% + advs <- advs |> mutate( BNRIND = factor( as.character(BNRIND), @@ -445,29 +444,33 @@ testthat::test_that("a_freq_j works (old count_subject case)", { ) ) - advs <- advs %>% - left_join( - ., - adsl %>% select(USUBJID, ARM, BNRIND_header, BNRIND_header2), - by = "USUBJID" - ) %>% + # Create a temporary variable to store the result of the join + advs_joined <- left_join( + advs, + adsl |> select(USUBJID, ARM, BNRIND_header, BNRIND_header2), + by = "USUBJID", + relationship = "many-to-many" + ) + + # Now filter using the temporary variable + advs <- advs_joined |> filter(ABLFL != "Y") ANRIND_levels <- levels(advs$ANRIND) - lyt <- basic_table(show_colcounts = FALSE) %>% + lyt <- basic_table(show_colcounts = FALSE) |> ## to ensure N column is not under the Baseline column span header - split_cols_by("BNRIND_header") %>% - split_cols_by("BNRIND", split_fun = keep_split_levels("N")) %>% - split_cols_by("BNRIND_header2", nested = FALSE) %>% + split_cols_by("BNRIND_header") |> + split_cols_by("BNRIND", split_fun = keep_split_levels("N")) |> + split_cols_by("BNRIND_header2", nested = FALSE) |> split_cols_by( "BNRIND", split_fun = make_split_fun( pre = list(rm_levels(excl = "N")), post = list(add_overall_facet("TOTAL", "Total")) ) - ) %>% - split_rows_by("ARM", child_labels = "hidden") %>% + ) |> + split_rows_by("ARM", child_labels = "hidden") |> # these counts will be checked as result1/expected1 summarize_row_groups( var = "ARM", @@ -479,14 +482,14 @@ testthat::test_that("a_freq_j works (old count_subject case)", { restr_columns = "N", extrablanklineafter = "C: Combination" ) - ) %>% - split_rows_by("PARAM", nested = FALSE, split_fun = drop_split_levels) %>% + ) |> + split_rows_by("PARAM", nested = FALSE, split_fun = drop_split_levels) |> split_rows_by( "ARM", label_pos = "hidden", split_label = "Treatment Group", section_div = " " - ) %>% + ) |> # these counts will be checked as result2/expected2 summarize_row_groups( "ARM", @@ -496,7 +499,7 @@ testthat::test_that("a_freq_j works (old count_subject case)", { .stats = "denom", restr_columns = "N" ) - ) %>% + ) |> # these counts will be checked as result3/expected3 analyze( "ANRIND", @@ -541,10 +544,10 @@ testthat::test_that("a_freq_j works (old count_subject case)", { ) ))) expected2 <- unname(unlist(as.list(table( - advs %>% + advs |> filter( PARAMCD == "DIABP" & AVISIT == "WEEK 1 DAY 8" & ARM == "C: Combination" - ) %>% + ) |> select(ARM) )))["C: Combination"]) @@ -564,10 +567,10 @@ testthat::test_that("a_freq_j works (old count_subject case)", { ) )) expected3 <- table( - advs %>% + advs |> filter( PARAMCD == "DIABP" & AVISIT == "WEEK 1 DAY 8" & ARM == "B: Placebo" - ) %>% + ) |> select(BNRIND, ANRIND) )["NORMAL", "HIGH"] @@ -582,8 +585,8 @@ testthat::test_that("`a_freq_j()` works", { adsl <- ex_adsl adae <- ex_adae - lyt <- basic_table(show_colcounts = TRUE) %>% - split_cols_by("ARM") %>% + lyt <- basic_table(show_colcounts = TRUE) |> + split_cols_by("ARM") |> analyze( vars = "STUDYID", afun = a_freq_j, @@ -614,8 +617,8 @@ testthat::test_that("a_freq_j works (old a_countpat_newlevels case)", { new_BMRKR2_levels <- list(c("Medium - High"), list(c("MEDIUM", "HIGH"))) - lyt <- basic_table(show_colcounts = TRUE) %>% - split_cols_by("ARM") %>% + lyt <- basic_table(show_colcounts = TRUE) |> + split_cols_by("ARM") |> ### add extra combined level for BMRKR2 analyze( vars = "BMRKR2", @@ -644,8 +647,8 @@ testthat::test_that("a_freq_j works (old a_countpat_newlevels case)", { testthat::expect_identical(result_label, expected_label) expected_value <- table( - adsl %>% - filter(ARM == "A: Drug X") %>% + adsl |> + filter(ARM == "A: Drug X") |> select(BMRKR2) ) @@ -694,15 +697,15 @@ testthat::test_that("a_summarize_ex_j works", { top_level_section_div = " ", show_colcounts = TRUE, colcount_format = "N=xx" - ) %>% - split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) %>% - split_cols_by("ARM") %>% - split_cols_by("diff_header", nested = FALSE) %>% + ) |> + split_cols_by("colspan_trt", split_fun = trim_levels_in_group("ARM")) |> + split_cols_by("ARM") |> + split_cols_by("diff_header", nested = FALSE) |> split_cols_by( "ARM", split_fun = remove_split_levels("B: Placebo"), labels_var = "diff_label" - ) %>% + ) |> analyze( "EOSDY", afun = a_summarize_ex_j, @@ -720,7 +723,7 @@ testthat::test_that("a_summarize_ex_j works", { tbl <- build_table(lyt, adsl) - rps <- make_row_df(tbl) %>% + rps <- make_row_df(tbl) |> filter(node_class == "DataRow") result_1 <- unname(cell_values(tbl[, c( @@ -735,7 +738,7 @@ testthat::test_that("a_summarize_ex_j works", { })) names(result_1[[4]]) <- NULL - xx_1 <- adsl %>% filter(ARM == "C: Combination" & !is.na(EOSDY)) + xx_1 <- adsl |> filter(ARM == "C: Combination" & !is.na(EOSDY)) xx_1. <- xx_1[["EOSDY"]] ## quantiles rather than IQR, label misleading @@ -774,7 +777,7 @@ testthat::test_that("a_summarize_ex_j works", { })) names(result_2[[4]]) <- NULL - xx_2 <- adsl %>% filter(ARM == "B: Placebo" & !is.na(EOSDY)) + xx_2 <- adsl |> filter(ARM == "B: Placebo" & !is.na(EOSDY)) xx_2. <- xx_2[["EOSDY"]] ## quantiles rather than IQR, label misleading @@ -807,7 +810,7 @@ testthat::test_that("a_summarize_ex_j works", { attr(result_3, "label") <- NULL df <- xx_1 - .df_row <- adsl %>% filter(!is.na(EOSDY)) + .df_row <- adsl |> filter(!is.na(EOSDY)) .ref_group <- xx_2 .in_ref_col <- FALSE From 767a05c3896b54a5f6b8de982395b579899ad9ed Mon Sep 17 00:00:00 2001 From: iaugusty <iaugusty@its.jnj.com> Date: Thu, 11 Dec 2025 14:24:55 +0000 Subject: [PATCH 38/49] update to jjcsformat_range_fct per suggestion Joe --- R/jjcsformats.R | 13 +++++++++---- man/jjcsformat_xx.Rd | 8 ++++++-- tests/testthat/_snaps/jjcsformats.md | 4 ++++ tests/testthat/test-jjcsformats.R | 2 ++ 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 7909258b..9430aee2 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -321,13 +321,15 @@ jjcsformat_pval_fct <- function(alpha = 0.05) { #' #' @param str (`string`)\cr the format specifying the number of digits to be used, #' for the range values, e.g. `"xx.xx"`. +#' +#' @param censor_char (`string`)\cr the character (of length 1) to be appended to `min` or `max` #' @return A function that formats a numeric vector with 4 elements: #' - minimum #' - maximum #' - censored minimum? (1 if censored, 0 if event) #' - censored maximum? (1 if censored, 0 if event) #' The range along with the censoring information is returned as a string -#' with the specified numeric format as `(min, max)`, and the `+` is appended +#' with the specified numeric format as `(min, max)`, and the `censor_char` is appended #' to `min` or `max` if these have been censored. #' #' @export @@ -338,8 +340,11 @@ jjcsformat_pval_fct <- function(alpha = 0.05) { #' my_range_format(c(0.35235, 99.2342, 0, 1)) #' my_range_format(c(0.35235, 99.2342, 0, 0)) #' my_range_format(c(0.35235, 99.2342, 1, 1)) -jjcsformat_range_fct <- function(str) { +#' my_range_format <- jjcsformat_range_fct("xx.xx", censor_char = "*") +#' my_range_format(c(0.35235, 99.2342, 1, 1)) +jjcsformat_range_fct <- function(str, censor_char = "+") { format_xx <- jjcsformat_xx(str) + checkmate::assert_string(censor_char, na.ok = FALSE, n.chars = 1) function(x, output, round_type = valid_round_type, ...) { round_type <- match.arg(round_type) @@ -354,8 +359,8 @@ jjcsformat_range_fct <- function(str) { res <- vapply(x[c(1, 2)], FUN = function(x) { format_value(x, format_xx, round_type = round_type) }, character(1)) - if (x[3] == 1) res[1] <- paste0(res[1], "+") - if (x[4] == 1) res[2] <- paste0(res[2], "+") + if (x[3] == 1) res[1] <- paste0(res[1], censor_char) + if (x[4] == 1) res[2] <- paste0(res[2], censor_char) paste0("(", res[1], ", ", res[2], ")") } } diff --git a/man/jjcsformat_xx.Rd b/man/jjcsformat_xx.Rd index ec0062fc..dc980cd6 100644 --- a/man/jjcsformat_xx.Rd +++ b/man/jjcsformat_xx.Rd @@ -15,7 +15,7 @@ jjcsformat_xx( jjcsformat_pval_fct(alpha = 0.05) -jjcsformat_range_fct(str) +jjcsformat_range_fct(str, censor_char = "+") } \arguments{ \item{str}{(\code{string})\cr the format specifying the number of digits to be used, @@ -31,6 +31,8 @@ the formatters framework be overridden by \code{na_str_default}? Defaults to base \code{formatters} framework.} \item{alpha}{(\code{numeric})\cr the significance level to account for during rounding.} + +\item{censor_char}{(\code{string})\cr the character (of length 1) to be appended to \code{min} or \code{max}} } \value{ Either a supported format string, or a formatting function that can be @@ -51,7 +53,7 @@ A function that formats a numeric vector with 4 elements: \item censored minimum? (1 if censored, 0 if event) \item censored maximum? (1 if censored, 0 if event) The range along with the censoring information is returned as a string -with the specified numeric format as \verb{(min, max)}, and the \code{+} is appended +with the specified numeric format as \verb{(min, max)}, and the \code{censor_char} is appended to \code{min} or \code{max} if these have been censored. } } @@ -98,6 +100,8 @@ my_range_format(c(0.35235, 99.2342, 1, 0)) my_range_format(c(0.35235, 99.2342, 0, 1)) my_range_format(c(0.35235, 99.2342, 0, 0)) my_range_format(c(0.35235, 99.2342, 1, 1)) +my_range_format <- jjcsformat_range_fct("xx.xx", censor_char = "*") +my_range_format(c(0.35235, 99.2342, 1, 1)) } \seealso{ Other JJCS formatting functions: diff --git a/tests/testthat/_snaps/jjcsformats.md b/tests/testthat/_snaps/jjcsformats.md index 72f3200b..391fa205 100644 --- a/tests/testthat/_snaps/jjcsformats.md +++ b/tests/testthat/_snaps/jjcsformats.md @@ -199,6 +199,10 @@ my_range_format(c(0.35235, 99.2342, 1, 1)) Output [1] "(0.35+, 99.23+)" + Code + my_range_format2(c(0.35235, 99.2342, 0, 1)) + Output + [1] "(0.35, 99.23*)" # jjcsformat_pval_fct works diff --git a/tests/testthat/test-jjcsformats.R b/tests/testthat/test-jjcsformats.R index f86636d7..2095ae46 100644 --- a/tests/testthat/test-jjcsformats.R +++ b/tests/testthat/test-jjcsformats.R @@ -154,11 +154,13 @@ test_that("round_type support works", { test_that("jjcsformat_range_fct is formatting ranges as expected", { my_range_format <- jjcsformat_range_fct("xx.xx") + my_range_format2 <- jjcsformat_range_fct("xx.xx", censor_char = "*") expect_snapshot({ my_range_format(c(0.35235, 99.2342, 1, 0)) my_range_format(c(0.35235, 99.2342, 0, 1)) my_range_format(c(0.35235, 99.2342, 0, 0)) my_range_format(c(0.35235, 99.2342, 1, 1)) + my_range_format2(c(0.35235, 99.2342, 0, 1)) }) }) From 621cd5ad01663ab7c02af7b7d2d0a8e499ae2120 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 11 Dec 2025 14:51:34 +0000 Subject: [PATCH 39/49] [skip style] [skip vbump] Restyle files --- R/jjcsformats.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 9430aee2..4f1c06e7 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -321,7 +321,7 @@ jjcsformat_pval_fct <- function(alpha = 0.05) { #' #' @param str (`string`)\cr the format specifying the number of digits to be used, #' for the range values, e.g. `"xx.xx"`. -#' +#' #' @param censor_char (`string`)\cr the character (of length 1) to be appended to `min` or `max` #' @return A function that formats a numeric vector with 4 elements: #' - minimum From 6d012fa70d9dbcf1060f59c29579c6fd9d919264 Mon Sep 17 00:00:00 2001 From: iaugusty <iaugusty@its.jnj.com> Date: Thu, 11 Dec 2025 14:54:56 +0000 Subject: [PATCH 40/49] add censor_char to function --- .../testthat/_snaps/junco_utils_default_stats_formats_labels.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md index 27c0a0b0..882ed6fe 100644 --- a/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md +++ b/tests/testthat/_snaps/junco_utils_default_stats_formats_labels.md @@ -18,7 +18,7 @@ Code normalize_fun(res$range_with_cens_info) Output - [1] "{round_type<-match.arg(round_type)checkmate::assert_numeric(x,len=4L,finite=TRUE,any.missing=FALSE)checkmate::assert_true(all(x[c(3,4)]%in%c(0,1)))res<-vapply(x[c(1,2)],FUN=function(x){format_value(x,format_xx,round_type=round_type)},character(1))if(x[3]==1)res[1]<-paste0(res[1],\"+\")if(x[4]==1)res[2]<-paste0(res[2],\"+\")paste0(\"(\",res[1],\",\",res[2],\")\")}" + [1] "{round_type<-match.arg(round_type)checkmate::assert_numeric(x,len=4L,finite=TRUE,any.missing=FALSE)checkmate::assert_true(all(x[c(3,4)]%in%c(0,1)))res<-vapply(x[c(1,2)],FUN=function(x){format_value(x,format_xx,round_type=round_type)},character(1))if(x[3]==1)res[1]<-paste0(res[1],censor_char)if(x[4]==1)res[2]<-paste0(res[2],censor_char)paste0(\"(\",res[1],\",\",res[2],\")\")}" # get_labels_from_stats works as expected From d78ddbfc0e593ac0e56ad195fed6f0091df39784 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?David=20Mu=C3=B1oz=20Tord?= <david.munoz@mailbox.org> Date: Thu, 11 Dec 2025 16:14:27 +0100 Subject: [PATCH 41/49] Update R-CMD-check.yaml --- .github/workflows/R-CMD-check.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/R-CMD-check.yaml b/.github/workflows/R-CMD-check.yaml index a4ce8ae7..c59ef412 100644 --- a/.github/workflows/R-CMD-check.yaml +++ b/.github/workflows/R-CMD-check.yaml @@ -25,7 +25,7 @@ jobs: fail-fast: false matrix: config: - - {os: macos-latest, r: 'release'} +# - {os: macos-latest, r: 'release'} - {os: windows-latest, r: 'release'} - {os: ubuntu-latest, r: 'release'} From a854b548d8c33239cb22a3cc99da70fd9b41db97 Mon Sep 17 00:00:00 2001 From: iaugusty <iaugusty@its.jnj.com> Date: Fri, 12 Dec 2025 14:08:07 +0000 Subject: [PATCH 42/49] update to tt_to_tbldf and add tests --- R/tt_to_tblfile.R | 112 +++++++---- man/tt_to_tbldf.Rd | 4 + man/tt_to_tlgrtf.Rd | 6 +- .../_snaps/tt_to_tblfile/test4iec.rtf | 40 ++++ .../_snaps/tt_to_tblfile/test4iecmod.rtf | 40 ++++ .../_snaps/tt_to_tblfile/test4sas.rtf | 40 ++++ tests/testthat/test-tt_to_tblfile.R | 174 +++++++++++++++++- 7 files changed, 370 insertions(+), 46 deletions(-) create mode 100755 tests/testthat/_snaps/tt_to_tblfile/test4iec.rtf create mode 100755 tests/testthat/_snaps/tt_to_tblfile/test4iecmod.rtf create mode 100755 tests/testthat/_snaps/tt_to_tblfile/test4sas.rtf diff --git a/R/tt_to_tblfile.R b/R/tt_to_tblfile.R index 8b43e07d..e893ae40 100644 --- a/R/tt_to_tblfile.R +++ b/R/tt_to_tblfile.R @@ -4,6 +4,8 @@ #' @param fontspec (`font_spec`)\cr Font specification object #' @param string_map (`list`)\cr Unicode mapping for special characters #' @param markup_df (`data.frame`)\cr Data frame containing markup information +#' @param round_type (`character(1)`)\cr the type of rounding to perform. +#' See [formatters::format_value()] for more details. #' @param validate logical(1). Whether to validate the table structure using #' `rtables::validate_table_struct()`. Defaults to `TRUE`. If `FALSE`, a message #' will be displayed instead of stopping with an error when validation fails. @@ -14,8 +16,8 @@ tt_to_tbldf <- function( fontspec = font_spec("Times", 9L, 1), string_map = default_str_map, markup_df = dps_markup_df, - validate = TRUE -) { + round_type = obj_round_type(tt), + validate = TRUE) { if (validate) { if (!validate_table_struct(tt)) { stop( @@ -39,7 +41,8 @@ tt_to_tbldf <- function( tt, indent_rownames = FALSE, expand_newlines = FALSE, - fontspec = fontspec + fontspec = fontspec, + round_type = round_type ) strmat <- mf_strings(mpf) @@ -232,6 +235,33 @@ get_ncol <- function(tt) { } } +# apply format and round_type to columns of a listing_df object and return as a dataframe +# only for usage as input to gentlg +listingdf_dataframe_formats <- function(df, round_type = obj_round_type(df)) { + if (!is(df, "listing_df")) { + return(df) + } else { + cols <- listing_dispcols(df) + df[cols] <- lapply(names(df), function(col) { + fmt <- obj_format(df[[col]]) + if (!is.null(fmt)) { + na_str <- obj_na_str(df[[col]]) + lbl <- obj_label(df[[col]]) + + df[[col]] <- sapply(df[[col]], FUN = function(x) { + format_value(x, format = fmt, na_str = na_str, round_type = round_type) + }) + + obj_label(df[[col]]) <- lbl + } + df[[col]] + }) + } + + class(df) <- "data.frame" + df +} + #' @name tt_to_tlgrtf #' @title TableTree to .rtf Conversion #' @description @@ -268,10 +298,8 @@ get_ncol <- function(tt) { #' and k is the number of lines the header takes up. See [tidytlg::add_bottom_borders] #' for what the matrix should contain. Users should only specify this when the #' default behavior does not meet their needs. -#' @param round_type (`"iec"` or `"sas"`)\cr the type of rounding to perform. iec, -#' the default, performs rounding compliant with IEC 60559, while -#' sas performs nearest-value rounding consistent with rounding within SAS. -#' See `[formatters::format_value()]` for more details. +#' @param round_type (`character(1)`)\cr the type of rounding to perform. +#' See [formatters::format_value()] for more details. #' @param validate logical(1). Whether to validate the table structure using #' `rtables::validate_table_struct()`. Defaults to `TRUE`. If `FALSE`, a message #' will be displayed when validation fails. @@ -290,38 +318,38 @@ get_ncol <- function(tt) { #' @return If `file` is non-NULL, this is called for the side-effect of writing #' one or more RTF files. Otherwise, returns a list of `huxtable` objects. tt_to_tlgrtf <- function( - tt, - file = NULL, - orientation = c("portrait", "landscape"), - colwidths = def_colwidths( tt, - fontspec, - col_gap = col_gap, - label_width_ins = label_width_ins, - type = tlgtype - ), - label_width_ins = 2, - watermark = NULL, - pagenum = ifelse(tlgtype == "Listing", TRUE, FALSE), - fontspec = font_spec("Times", 9L, 1.2), - pg_width = pg_width_by_orient(orientation == "landscape"), - margins = c(0, 0, 0, 0), - paginate = tlg_type(tt) == "Table", - col_gap = ifelse(tlgtype == "Listing", .5, 3), - nosplitin = list( - row = character(), - col = character() - ), - verbose = FALSE, - tlgtype = tlg_type(tt), - string_map = default_str_map, - markup_df = dps_markup_df, - combined_rtf = FALSE, - one_table = TRUE, - border_mat = make_header_bordmat(obj = tt), - round_type = obj_round_type(tt), - validate = TRUE, - ... + file = NULL, + orientation = c("portrait", "landscape"), + colwidths = def_colwidths( + tt, + fontspec, + col_gap = col_gap, + label_width_ins = label_width_ins, + type = tlgtype + ), + label_width_ins = 2, + watermark = NULL, + pagenum = ifelse(tlgtype == "Listing", TRUE, FALSE), + fontspec = font_spec("Times", 9L, 1.2), + pg_width = pg_width_by_orient(orientation == "landscape"), + margins = c(0, 0, 0, 0), + paginate = tlg_type(tt) == "Table", + col_gap = ifelse(tlgtype == "Listing", .5, 3), + nosplitin = list( + row = character(), + col = character() + ), + verbose = FALSE, + tlgtype = tlg_type(tt), + string_map = default_str_map, + markup_df = dps_markup_df, + combined_rtf = FALSE, + one_table = TRUE, + border_mat = make_header_bordmat(obj = tt), + round_type = obj_round_type(tt), + validate = TRUE, + ... ) { if (validate && tlgtype == "Table" && methods::is(tt, "VTableTree")) { if (!rtables::validate_table_struct(tt)) { @@ -520,7 +548,8 @@ tt_to_tlgrtf <- function( tt_to_tbldf, fontspec = fontspec, string_map = string_map, - markup_df = markup_df + markup_df = markup_df, + round_type = round_type ) if (one_table) { df <- do.call( @@ -544,11 +573,14 @@ tt_to_tlgrtf <- function( tt, fontspec = fontspec, string_map = string_map, - markup_df = markup_df + markup_df = markup_df, + round_type = round_type ) } } else { df <- tt[, listing_dispcols(tt)] + # apply formats and round_type and return df as a dataframe to input in gentlg + df <- listingdf_dataframe_formats(df, round_type = round_type) } ## we only care about the col labels here... diff --git a/man/tt_to_tbldf.Rd b/man/tt_to_tbldf.Rd index ef200676..a2434b02 100644 --- a/man/tt_to_tbldf.Rd +++ b/man/tt_to_tbldf.Rd @@ -9,6 +9,7 @@ tt_to_tbldf( fontspec = font_spec("Times", 9L, 1), string_map = default_str_map, markup_df = dps_markup_df, + round_type = obj_round_type(tt), validate = TRUE ) } @@ -21,6 +22,9 @@ tt_to_tbldf( \item{markup_df}{(\code{data.frame})\cr Data frame containing markup information} +\item{round_type}{(\code{character(1)})\cr the type of rounding to perform. +See \code{\link[formatters:format_value]{formatters::format_value()}} for more details.} + \item{validate}{logical(1). Whether to validate the table structure using \code{rtables::validate_table_struct()}. Defaults to \code{TRUE}. If \code{FALSE}, a message will be displayed instead of stopping with an error when validation fails.} diff --git a/man/tt_to_tlgrtf.Rd b/man/tt_to_tlgrtf.Rd index e60fa8dd..87eb14e4 100644 --- a/man/tt_to_tlgrtf.Rd +++ b/man/tt_to_tlgrtf.Rd @@ -84,10 +84,8 @@ and k is the number of lines the header takes up. See \link[tidytlg:add_bottom_b for what the matrix should contain. Users should only specify this when the default behavior does not meet their needs.} -\item{round_type}{(\code{"iec"} or \code{"sas"})\cr the type of rounding to perform. iec, -the default, performs rounding compliant with IEC 60559, while -sas performs nearest-value rounding consistent with rounding within SAS. -See \verb{[formatters::format_value()]} for more details.} +\item{round_type}{(\code{character(1)})\cr the type of rounding to perform. +See \code{\link[formatters:format_value]{formatters::format_value()}} for more details.} \item{validate}{logical(1). Whether to validate the table structure using \code{rtables::validate_table_struct()}. Defaults to \code{TRUE}. If \code{FALSE}, a message diff --git a/tests/testthat/_snaps/tt_to_tblfile/test4iec.rtf b/tests/testthat/_snaps/tt_to_tblfile/test4iec.rtf new file mode 100755 index 00000000..c14ce897 --- /dev/null +++ b/tests/testthat/_snaps/tt_to_tblfile/test4iec.rtf @@ -0,0 +1,40 @@ +{\rtf1\ansi\deff0\portrait\paperw12240\paperh15840\margl1440\margr1440\margt1440\margb1440\headery1440\footery1440{\stylesheet{\ql \li0\ri0\widctlpar\wrapdefault\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs20\alang1025 \ltrch\fcs0 \fs20\lang9\langfe3081\loch\f0\hich\af0\dbch\af31505\cgrid\langnp9\langfenp3081 \snext0 \sqformat \spriority0 Normal;}{\s15\ql \fi-1152\li1152\ri0\keepn\widctlpar\tx1152\wrapdefault\faauto\rin0\lin1152\itap0 \rtlch\fcs1 \af0\afs18\alang1025 \ltrch\fcs0 \b\fs20\lang1033\langfe1033\loch\f0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 \sbasedon0 \snext0 \sqformat caption;}{\s16 \ql \fi-1152\li1152\ri0\keepn\widctlpar\tx1152\wrapdefault\faauto\rin0\lin1152\itap0 \rtlch\fcs1 \af0\afs18\alang1025 \ltrch\fcs0 \b\fs20\lang1033\langfe1033\loch\f0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 \sbasedon0 \snext0 \sqformat;}} + + +{ +\trowd +\trqc \clmgf\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx2872 +\clmrg\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx4786 +\clmrg\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx6701 +\clmrg\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx9189 \pard\intbl\ql\fs20 \b \pnhang\trhdr\fi-1152\li1152\keepn\s15 test4iec:\tab \b0 \cell +\pard\intbl\ql\fs20 \b \b0 \cell +\pard\intbl\ql\fs20 \b \b0 \cell +\pard\intbl\ql\fs20 \b \b0 \cell +\row +} + +{ +\trowd +\trqc \clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadr67\clpadfr3\cellx2872 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadt67\clpadft3\clpadr67\clpadfr3\cellx4786 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadt67\clpadft3\clpadr67\clpadfr3\cellx6701 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadt67\clpadft3\clpadr67\clpadfr3\cellx9189 \pard\intbl\ql\fs18 \keepn\trhdr \cell +\pard\intbl\qc\fs18 \keepn\trhdr ARM A \cell +\pard\intbl\qc\fs18 \keepn\trhdr ARM B \cell +\pard\intbl\qc\fs18 \keepn\trhdr ARM C \cell +\row +} + +{ +\trowd +\trqc \clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx2872 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx4786 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx6701 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx9189 \pard\intbl\ql\fs18 \intbl\li87\fi-87 Mean \cell +\pard\intbl\qc\fs18 1.86 \cell +\pard\intbl\qc\fs18 2.98 \cell +\pard\intbl\qc\fs18 -0.00 \cell +\row +} + +} \ No newline at end of file diff --git a/tests/testthat/_snaps/tt_to_tblfile/test4iecmod.rtf b/tests/testthat/_snaps/tt_to_tblfile/test4iecmod.rtf new file mode 100755 index 00000000..01c9e2a4 --- /dev/null +++ b/tests/testthat/_snaps/tt_to_tblfile/test4iecmod.rtf @@ -0,0 +1,40 @@ +{\rtf1\ansi\deff0\portrait\paperw12240\paperh15840\margl1440\margr1440\margt1440\margb1440\headery1440\footery1440{\stylesheet{\ql \li0\ri0\widctlpar\wrapdefault\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs20\alang1025 \ltrch\fcs0 \fs20\lang9\langfe3081\loch\f0\hich\af0\dbch\af31505\cgrid\langnp9\langfenp3081 \snext0 \sqformat \spriority0 Normal;}{\s15\ql \fi-1152\li1152\ri0\keepn\widctlpar\tx1152\wrapdefault\faauto\rin0\lin1152\itap0 \rtlch\fcs1 \af0\afs18\alang1025 \ltrch\fcs0 \b\fs20\lang1033\langfe1033\loch\f0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 \sbasedon0 \snext0 \sqformat caption;}{\s16 \ql \fi-1152\li1152\ri0\keepn\widctlpar\tx1152\wrapdefault\faauto\rin0\lin1152\itap0 \rtlch\fcs1 \af0\afs18\alang1025 \ltrch\fcs0 \b\fs20\lang1033\langfe1033\loch\f0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 \sbasedon0 \snext0 \sqformat;}} + + +{ +\trowd +\trqc \clmgf\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx2872 +\clmrg\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx4786 +\clmrg\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx6701 +\clmrg\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx9189 \pard\intbl\ql\fs20 \b \pnhang\trhdr\fi-1152\li1152\keepn\s15 test4iecmod:\tab \b0 \cell +\pard\intbl\ql\fs20 \b \b0 \cell +\pard\intbl\ql\fs20 \b \b0 \cell +\pard\intbl\ql\fs20 \b \b0 \cell +\row +} + +{ +\trowd +\trqc \clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadr67\clpadfr3\cellx2872 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadt67\clpadft3\clpadr67\clpadfr3\cellx4786 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadt67\clpadft3\clpadr67\clpadfr3\cellx6701 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadt67\clpadft3\clpadr67\clpadfr3\cellx9189 \pard\intbl\ql\fs18 \keepn\trhdr \cell +\pard\intbl\qc\fs18 \keepn\trhdr ARM A \cell +\pard\intbl\qc\fs18 \keepn\trhdr ARM B \cell +\pard\intbl\qc\fs18 \keepn\trhdr ARM C \cell +\row +} + +{ +\trowd +\trqc \clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx2872 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx4786 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx6701 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx9189 \pard\intbl\ql\fs18 \intbl\li87\fi-87 Mean \cell +\pard\intbl\qc\fs18 1.86 \cell +\pard\intbl\qc\fs18 2.98 \cell +\pard\intbl\qc\fs18 0.00 \cell +\row +} + +} \ No newline at end of file diff --git a/tests/testthat/_snaps/tt_to_tblfile/test4sas.rtf b/tests/testthat/_snaps/tt_to_tblfile/test4sas.rtf new file mode 100755 index 00000000..fde5d3a4 --- /dev/null +++ b/tests/testthat/_snaps/tt_to_tblfile/test4sas.rtf @@ -0,0 +1,40 @@ +{\rtf1\ansi\deff0\portrait\paperw12240\paperh15840\margl1440\margr1440\margt1440\margb1440\headery1440\footery1440{\stylesheet{\ql \li0\ri0\widctlpar\wrapdefault\faauto\adjustright\rin0\lin0\itap0 \rtlch\fcs1 \af0\afs20\alang1025 \ltrch\fcs0 \fs20\lang9\langfe3081\loch\f0\hich\af0\dbch\af31505\cgrid\langnp9\langfenp3081 \snext0 \sqformat \spriority0 Normal;}{\s15\ql \fi-1152\li1152\ri0\keepn\widctlpar\tx1152\wrapdefault\faauto\rin0\lin1152\itap0 \rtlch\fcs1 \af0\afs18\alang1025 \ltrch\fcs0 \b\fs20\lang1033\langfe1033\loch\f0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 \sbasedon0 \snext0 \sqformat caption;}{\s16 \ql \fi-1152\li1152\ri0\keepn\widctlpar\tx1152\wrapdefault\faauto\rin0\lin1152\itap0 \rtlch\fcs1 \af0\afs18\alang1025 \ltrch\fcs0 \b\fs20\lang1033\langfe1033\loch\f0\hich\af0\dbch\af31505\cgrid\langnp1033\langfenp1033 \sbasedon0 \snext0 \sqformat;}} + + +{ +\trowd +\trqc \clmgf\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx2872 +\clmrg\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx4786 +\clmrg\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx6701 +\clmrg\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx9189 \pard\intbl\ql\fs20 \b \pnhang\trhdr\fi-1152\li1152\keepn\s15 test4sas:\tab \b0 \cell +\pard\intbl\ql\fs20 \b \b0 \cell +\pard\intbl\ql\fs20 \b \b0 \cell +\pard\intbl\ql\fs20 \b \b0 \cell +\row +} + +{ +\trowd +\trqc \clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadr67\clpadfr3\cellx2872 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadt67\clpadft3\clpadr67\clpadfr3\cellx4786 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadt67\clpadft3\clpadr67\clpadfr3\cellx6701 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\clpadt67\clpadft3\clpadr67\clpadfr3\cellx9189 \pard\intbl\ql\fs18 \keepn\trhdr \cell +\pard\intbl\qc\fs18 \keepn\trhdr ARM A \cell +\pard\intbl\qc\fs18 \keepn\trhdr ARM B \cell +\pard\intbl\qc\fs18 \keepn\trhdr ARM C \cell +\row +} + +{ +\trowd +\trqc \clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx2872 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx4786 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx6701 +\clbrdrt\brdrs\brdrw18\clbrdrl\clbrdrb\brdrs\brdrw18\clbrdrr\clvertalb\cellx9189 \pard\intbl\ql\fs18 \intbl\li87\fi-87 Mean \cell +\pard\intbl\qc\fs18 1.87 \cell +\pard\intbl\qc\fs18 2.99 \cell +\pard\intbl\qc\fs18 0.00 \cell +\row +} + +} \ No newline at end of file diff --git a/tests/testthat/test-tt_to_tblfile.R b/tests/testthat/test-tt_to_tblfile.R index 4e15104b..b4fa8ea5 100644 --- a/tests/testthat/test-tt_to_tblfile.R +++ b/tests/testthat/test-tt_to_tblfile.R @@ -17,9 +17,21 @@ mk_part_names <- function(nfiles, fname) { } } -rtf_out_wrapper <- function(tt, filnm, ..., part = 1, combined = FALSE) { +rtf_out_wrapper <- function( + tt, + filnm, + ..., + part = 1, + combined = FALSE, + round_type = obj_round_type(tt)) { fullfl <- file.path(tempdir(), filnm) - res <- tt_to_tlgrtf(tt, file = fullfl, ..., combined_rtf = combined) + res <- tt_to_tlgrtf( + tt, + file = fullfl, + ..., + combined_rtf = combined, + round_type = round_type + ) nf <- length(res) if (combined) { paste0(fullfl, "allparts.rtf") @@ -33,6 +45,96 @@ rtf_out_wrapper <- function(tt, filnm, ..., part = 1, combined = FALSE) { } } +# all elements result in different rounding sas vs iec with format xx.xx +# third element only results in different rounding iec_mod vs iec with format xx.xx +vals_round_type <- c(1.865, 2.985, -0.001) + +vals_round_type_fmt <- function(vals = vals_round_type, round_type = "sas") { + mapply(format_value, x = vals, format = "xx.xx", round_type = round_type) +} + +tt_to_test_round_type <- function(vals = vals_round_type, round_type = "iec") { + require(dplyr, quietly = TRUE) + txtvals_iec <- vals_round_type_fmt(vals = vals, round_type = "iec") + txtvals_sas <- vals_round_type_fmt(vals = vals, round_type = "sas") + + # confirmation that at least one of the values result in different format presentation + expect_true(any(txtvals_iec != txtvals_sas)) + + adsl <- ex_adsl + + adsl <- adsl |> + mutate(new_var = case_when( + ARMCD == "ARM A" ~ vals[1], + ARMCD == "ARM B" ~ vals[2], + ARMCD == "ARM C" ~ vals[3] + )) + + lyt <- basic_table(show_colcounts = FALSE, round_type = round_type) |> + split_cols_by("ARMCD") |> + analyze(c("new_var"), function(x) { + in_rows( + mean = mean(x), + .formats = c("xx.xx"), + .labels = c("Mean") + ) + }) + + tbl <- build_table(lyt, adsl) +} + +listingdf_to_test_round_type <- function(vals = vals_round_type, round_type = "iec") { + require(dplyr, quietly = TRUE) + txtvals_iec <- vals_round_type_fmt(vals = vals, round_type = "iec") + txtvals_sas <- vals_round_type_fmt(vals = vals, round_type = "sas") + + # confirmation that at least one of the values result in different format presentation + expect_true(any(txtvals_iec != txtvals_sas)) + + lsting <- ex_adae |> + dplyr::select(USUBJID, AGE, SEX, RACE, ARM, BMRKR1) |> + dplyr::distinct() |> + dplyr::group_by(ARM) |> + dplyr::slice_head(n = 2) |> + dplyr::ungroup() + + lsting[1, "BMRKR1"] <- 1.865 + lsting[2, "BMRKR1"] <- 2.985 + lsting[3, "BMRKR1"] <- -0.001 + + lsting <- lsting |> + dplyr::mutate( + AGE = tern::explicit_na(as.character(AGE), ""), + SEX = tern::explicit_na(SEX, ""), + RACE = explicit_na(RACE, ""), + COL0 = explicit_na(.data[["ARM"]], ""), + COL1 = explicit_na(USUBJID, ""), + COL2 = paste(AGE, SEX, RACE, sep = " / "), + COL3 = BMRKR1 + ) |> + arrange(COL0, COL1) + + lsting <- formatters::var_relabel( + lsting, + COL0 = "Treatment Group", + COL1 = "Subject ID", + COL2 = paste("Age (years)", "Sex", "Race", sep = " / "), + COL3 = "Biomarker 1" + ) + + ls1 <- rlistings::as_listing( + df = lsting, + key_cols = c("COL0", "COL1"), + disp_cols = c("COL0", "COL1", "COL2", "COL3"), + col_formatting = list(COL3 = formatters::fmt_config(format = "xx.xx")) + ) + + list( + df = lsting, + lsting = ls1 + ) +} + test_that("tt_to_tlgrtf works with input Table and fontspec size 8", { lyt_wide <- basic_table() |> split_cols_by("ARM") |> @@ -296,3 +398,71 @@ test_that("more top left than col headers works", { expect_true(file.exists(paste0(tmpfile, ".rtf"))) unlink(tmpfile) }) + +test_that("round_type in tt_to_tbldf works as expected for tabletree object", { + tbl_iec <- tt_to_test_round_type(round_type = "iec") + tbldf <- tt_to_tbldf(tbl_iec) + tbldf_sas <- tt_to_tbldf(tbl_iec, round_type = "sas") + expect_true(any(tbldf != tbldf_sas)) + expect_true(all( + tbldf[, c("col 1", "col 2", "col 3")] == + vals_round_type_fmt( + vals = vals_round_type, + round_type = "iec" + ) + )) + expect_true(all( + tbldf_sas[, c("col 1", "col 2", "col 3")] == + vals_round_type_fmt( + vals = vals_round_type, + round_type = "sas" + ) + )) +}) + +test_that("round_type in tt_to_tlgrtf works as expected for tabletree object", { + tbl_iec <- tt_to_test_round_type(round_type = "iec") + expect_silent(suppressMessages(rtf_sas <- rtf_out_wrapper(tbl_iec, "test4sas", round_type = "sas"))) + expect_snapshot_file(rtf_sas, cran = TRUE) + expect_silent(suppressMessages(rtf_iec_mod <- rtf_out_wrapper(tbl_iec, "test4iecmod", round_type = "iec_mod"))) + expect_snapshot_file(rtf_iec_mod, cran = TRUE) + expect_silent(suppressMessages(rtf_iec <- rtf_out_wrapper(tbl_iec, "test4iec", round_type = "iec"))) + expect_snapshot_file(rtf_iec, cran = TRUE) + + # test actual values for sas rounding + res_nullfl <- expect_silent(tt_to_tlgrtf(tbl_iec, round_type = "sas", file = NULL)) + + vals_from_res_nullfl <- res_nullfl[[1]][[1]] + vals_from_res_nullfl <- unname(unlist(vals_from_res_nullfl[3, 2:4])) + + expect_true(all( + vals_from_res_nullfl == + vals_round_type_fmt( + vals = vals_round_type, + round_type = "sas" + ) + )) +}) + +test_that("round_type in tt_to_tlgrtf works as expected for listing object", { + listdf_iec <- listingdf_to_test_round_type() + list_iec <- listdf_iec$lsting + df <- listdf_iec$df + + res_nullfl_sas <- expect_silent(tt_to_tlgrtf(list_iec, round_type = "sas", file = NULL)) + res_nullfl_iec <- expect_silent(tt_to_tlgrtf(list_iec, round_type = "iec", file = NULL)) + vals_from_res_nullfl_sas <- res_nullfl_sas[[1]][[4]][-c(1:2)] + vals_from_res_nullfl_iec <- res_nullfl_iec[[1]][[4]][-c(1:2)] + + expect_identical( + vals_round_type_fmt(vals = df[["BMRKR1"]], round_type = "sas"), + vals_from_res_nullfl_sas + ) + + expect_identical( + vals_round_type_fmt(vals = df[["BMRKR1"]], round_type = "iec"), + vals_from_res_nullfl_iec + ) + + expect_true(any(vals_from_res_nullfl_iec != vals_from_res_nullfl_sas)) +}) From 5c0ecb96934a3fa6fad4acd73e9ca2e2770889d0 Mon Sep 17 00:00:00 2001 From: iaugusty <iaugusty@its.jnj.com> Date: Fri, 12 Dec 2025 14:27:07 +0000 Subject: [PATCH 43/49] in calc_one_visit only sas and iec rounding for now --- R/column_stats.R | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/R/column_stats.R b/R/column_stats.R index c7ae9e9d..a2dbb9a7 100644 --- a/R/column_stats.R +++ b/R/column_stats.R @@ -1,4 +1,4 @@ -calc_one_visit <- function(datvec, decimal, statnm, visit, varnm, round_type = valid_round_type, exclude_visits, +calc_one_visit <- function(datvec, decimal, statnm, visit, varnm, round_type = c("sas", "iec"), exclude_visits, var_names = c("AVAL", "CHG", "BASE")) { round_type <- match.arg(round_type) if (is.na(decimal)) { From 45a4b47c3ca25192084a389031c485ad818c3488 Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 12 Dec 2025 14:38:45 +0000 Subject: [PATCH 44/49] [skip style] [skip vbump] Restyle files --- R/tt_to_tblfile.R | 65 +++++++++++++++-------------- tests/testthat/test-tt_to_tblfile.R | 13 +++--- 2 files changed, 40 insertions(+), 38 deletions(-) diff --git a/R/tt_to_tblfile.R b/R/tt_to_tblfile.R index e893ae40..4bdd90f2 100644 --- a/R/tt_to_tblfile.R +++ b/R/tt_to_tblfile.R @@ -17,7 +17,8 @@ tt_to_tbldf <- function( string_map = default_str_map, markup_df = dps_markup_df, round_type = obj_round_type(tt), - validate = TRUE) { + validate = TRUE +) { if (validate) { if (!validate_table_struct(tt)) { stop( @@ -318,38 +319,38 @@ listingdf_dataframe_formats <- function(df, round_type = obj_round_type(df)) { #' @return If `file` is non-NULL, this is called for the side-effect of writing #' one or more RTF files. Otherwise, returns a list of `huxtable` objects. tt_to_tlgrtf <- function( + tt, + file = NULL, + orientation = c("portrait", "landscape"), + colwidths = def_colwidths( tt, - file = NULL, - orientation = c("portrait", "landscape"), - colwidths = def_colwidths( - tt, - fontspec, - col_gap = col_gap, - label_width_ins = label_width_ins, - type = tlgtype - ), - label_width_ins = 2, - watermark = NULL, - pagenum = ifelse(tlgtype == "Listing", TRUE, FALSE), - fontspec = font_spec("Times", 9L, 1.2), - pg_width = pg_width_by_orient(orientation == "landscape"), - margins = c(0, 0, 0, 0), - paginate = tlg_type(tt) == "Table", - col_gap = ifelse(tlgtype == "Listing", .5, 3), - nosplitin = list( - row = character(), - col = character() - ), - verbose = FALSE, - tlgtype = tlg_type(tt), - string_map = default_str_map, - markup_df = dps_markup_df, - combined_rtf = FALSE, - one_table = TRUE, - border_mat = make_header_bordmat(obj = tt), - round_type = obj_round_type(tt), - validate = TRUE, - ... + fontspec, + col_gap = col_gap, + label_width_ins = label_width_ins, + type = tlgtype + ), + label_width_ins = 2, + watermark = NULL, + pagenum = ifelse(tlgtype == "Listing", TRUE, FALSE), + fontspec = font_spec("Times", 9L, 1.2), + pg_width = pg_width_by_orient(orientation == "landscape"), + margins = c(0, 0, 0, 0), + paginate = tlg_type(tt) == "Table", + col_gap = ifelse(tlgtype == "Listing", .5, 3), + nosplitin = list( + row = character(), + col = character() + ), + verbose = FALSE, + tlgtype = tlg_type(tt), + string_map = default_str_map, + markup_df = dps_markup_df, + combined_rtf = FALSE, + one_table = TRUE, + border_mat = make_header_bordmat(obj = tt), + round_type = obj_round_type(tt), + validate = TRUE, + ... ) { if (validate && tlgtype == "Table" && methods::is(tt, "VTableTree")) { if (!rtables::validate_table_struct(tt)) { diff --git a/tests/testthat/test-tt_to_tblfile.R b/tests/testthat/test-tt_to_tblfile.R index b4fa8ea5..a1550ef3 100644 --- a/tests/testthat/test-tt_to_tblfile.R +++ b/tests/testthat/test-tt_to_tblfile.R @@ -18,12 +18,13 @@ mk_part_names <- function(nfiles, fname) { } rtf_out_wrapper <- function( - tt, - filnm, - ..., - part = 1, - combined = FALSE, - round_type = obj_round_type(tt)) { + tt, + filnm, + ..., + part = 1, + combined = FALSE, + round_type = obj_round_type(tt) +) { fullfl <- file.path(tempdir(), filnm) res <- tt_to_tlgrtf( tt, From c0a0c3547eaae0bbdd8cebcda23acabbd54e6cf3 Mon Sep 17 00:00:00 2001 From: iaugusty <iaugusty@its.jnj.com> Date: Fri, 12 Dec 2025 14:44:15 +0000 Subject: [PATCH 45/49] trigger checks From bd78027fdd98c703af8836a502f338469171cd27 Mon Sep 17 00:00:00 2001 From: munoztd0 <david.munoztord@mailbox.org> Date: Fri, 12 Dec 2025 15:47:20 +0100 Subject: [PATCH 46/49] fix: DESCRIPTION --- DESCRIPTION | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/DESCRIPTION b/DESCRIPTION index 38d14510..0bd95f09 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -36,19 +36,14 @@ Description: Structure and formatting requirements for clinical trial table and License: Apache License (>= 2) URL: https://github.com/johnsonandjohnson/junco, https://johnsonandjohnson.github.io/junco/ BugReports: https://github.com/johnsonandjohnson/junco/issues -Remotes: - insightsengineering/formatters@main, - insightsengineering/rtables@main, - insightsengineering/rlistings@main, - insightsengineering/tern@main -Depends: +Depends: R (>= 4.4), - formatters (>= 0.5.6), + formatters (>= 0.5.12), rtables (>= 0.6.13) Imports: tidytlg (>= 0.1.5), tern (>= 0.9.9), - rlistings (>= 0.2.11), + rlistings (>= 0.2.13), checkmate (>= 2.1.0), broom, methods, @@ -75,7 +70,6 @@ Suggests: mvtnorm, parallel, readxl, - pharmaverseadam, rlang, rbmi (>= 1.3.0), tidyr, @@ -87,7 +81,5 @@ Suggests: VignetteBuilder: knitr Config/testthat/edition: 3 Remotes: - insightsengineering/formatters@main, - insightsengineering/rtables@main, - insightsengineering/rlistings@main + insightsengineering/rtables@main Additional_repositories: https://insightsengineering.r-universe.dev/ From b45b9e239eeff0b7509ba976bc0bec1e7c8d5edc Mon Sep 17 00:00:00 2001 From: Gabe Becker <gabembecker@gmail.com> Date: Mon, 15 Dec 2025 16:45:53 -0800 Subject: [PATCH 47/49] Rework calc_one_visit, add 'support' for 'default' format to jjcsformats --- R/column_stats.R | 70 +++++++++--------------------- R/jjcsformats.R | 3 ++ tests/testthat/test-column_stats.R | 12 +++++ 3 files changed, 35 insertions(+), 50 deletions(-) diff --git a/R/column_stats.R b/R/column_stats.R index a2dbb9a7..706057da 100644 --- a/R/column_stats.R +++ b/R/column_stats.R @@ -1,5 +1,5 @@ -calc_one_visit <- function(datvec, decimal, statnm, visit, varnm, round_type = c("sas", "iec"), exclude_visits, - var_names = c("AVAL", "CHG", "BASE")) { +calc_one_visit <- function(datvec, decimal, statnm, visit, varnm, round_type = valid_round_type, exclude_visits, + var_names = c("AVAL", "CHG", "BASE")) { round_type <- match.arg(round_type) if (is.na(decimal)) { decimal <- 0 @@ -7,54 +7,24 @@ calc_one_visit <- function(datvec, decimal, statnm, visit, varnm, round_type = c if ((varnm == var_names[2] || varnm == var_names[3]) && (visit %in% exclude_visits)) { return(NULL) } - if (round_type == "sas") { - switch(statnm, - N = length(stats::na.omit(datvec)), - SE = format( - tidytlg::roundSAS(stats::sd(datvec) / sqrt(length(stats::na.omit(datvec))), decimal + 2), - nsmall = decimal + 2 - ), - SD = format( - tidytlg::roundSAS(stats::sd(datvec), decimal + 2), - nsmall = decimal + - 2 - ), - Mean = format(tidytlg::roundSAS(mean(datvec), decimal + 1), nsmall = decimal + 1), - mean_sd = paste0( - format(tidytlg::roundSAS(mean(datvec), decimal + 1), nsmall = decimal + 1), - " (", - format( - tidytlg::roundSAS(stats::sd(datvec), decimal + 2), - nsmall = decimal + - 2 - ), - ")" - ), - Med = format(tidytlg::roundSAS(stats::median(datvec), decimal + 1), nsmall = decimal + 1), - Min = format(tidytlg::roundSAS(min(datvec), decimal), nsmall = decimal), - Max = format(tidytlg::roundSAS(max(datvec), decimal), nsmall = decimal) - ) - } else { - switch(statnm, - N = length(stats::na.omit(datvec)), - SE = format(round(stats::sd(datvec) / sqrt(length(stats::na.omit(datvec))), decimal + 2), nsmall = decimal + 2), - SD = format(round(stats::sd(datvec), decimal + 2), nsmall = decimal + 2), - Mean = format(round(mean(datvec), decimal + 1), nsmall = decimal + 1), - mean_sd = paste0( - format(round(mean(datvec), decimal + 1), nsmall = decimal + 1), - " (", - format( - round(stats::sd(datvec), decimal + 2), - nsmall = decimal + - 2 - ), - ")" - ), - Med = format(round(stats::median(datvec), decimal + 1), nsmall = decimal + 1), - Min = format(round(min(datvec), decimal), nsmall = decimal), - Max = format(round(max(datvec), decimal), nsmall = decimal) - ) - } + + switch(statnm, + N = length(stats::na.omit(datvec)), + SE = round_fmt(stats::sd(datvec) / sqrt(length(stats::na.omit(datvec))), + decimal + 2, round_type = round_type), + SD = round_fmt(stats::sd(datvec), decimal + 2, round_type = round_type), + Mean = round_fmt(mean(datvec), decimal + 1, + round_type = round_type), + mean_sd = paste0( + round_fmt(mean(datvec), decimal + 1, round_type = round_type), + " (", + round_fmt(stats::sd(datvec), decimal + 2, round_type = round_type), + ")" + ), + Med = round_fmt(stats::median(datvec), decimal + 1, round_type = round_type), + Min = round_fmt(min(datvec), decimal, round_type = round_type), + Max = round_fmt(max(datvec), decimal, round_type = round_type), + ) } #' @name column_stats diff --git a/R/jjcsformats.R b/R/jjcsformats.R index 4f1c06e7..e1e35513 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -47,6 +47,9 @@ jjcsformat_xx <- function( stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") } + if (identical(str, "default")) + return(str) + if (is_valid_format(str)) { rtable_format <- str } else { diff --git a/tests/testthat/test-column_stats.R b/tests/testthat/test-column_stats.R index 896ee04c..7bd9912c 100644 --- a/tests/testthat/test-column_stats.R +++ b/tests/testthat/test-column_stats.R @@ -248,6 +248,18 @@ test_that("column_stats handles iec round_type correctly", { ) expect_equal(result_SAS, "25.35") + # Compare to SAS rounding + result_R_mod <- calc_one_visit( + df$AVAL[df$AVISIT == "Week 1"], + 1, + "Mean", + "Week 1", + "AVAL", + round_type = "iec_mod", + exclude_visits = "Baseline (DB)" + ) + expect_equal(result_R_mod, "25.34") + df <- data.frame( AVISIT = c( From 860424bcbbf98904c21a1340f5623a66344e387c Mon Sep 17 00:00:00 2001 From: github-actions <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 16 Dec 2025 00:48:34 +0000 Subject: [PATCH 48/49] [skip style] [skip vbump] Restyle files --- R/column_stats.R | 9 ++++++--- R/jjcsformats.R | 3 ++- tests/testthat/test-column_stats.R | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/R/column_stats.R b/R/column_stats.R index 706057da..4e606730 100644 --- a/R/column_stats.R +++ b/R/column_stats.R @@ -1,5 +1,5 @@ calc_one_visit <- function(datvec, decimal, statnm, visit, varnm, round_type = valid_round_type, exclude_visits, - var_names = c("AVAL", "CHG", "BASE")) { + var_names = c("AVAL", "CHG", "BASE")) { round_type <- match.arg(round_type) if (is.na(decimal)) { decimal <- 0 @@ -11,10 +11,13 @@ calc_one_visit <- function(datvec, decimal, statnm, visit, varnm, round_type = v switch(statnm, N = length(stats::na.omit(datvec)), SE = round_fmt(stats::sd(datvec) / sqrt(length(stats::na.omit(datvec))), - decimal + 2, round_type = round_type), + decimal + 2, + round_type = round_type + ), SD = round_fmt(stats::sd(datvec), decimal + 2, round_type = round_type), Mean = round_fmt(mean(datvec), decimal + 1, - round_type = round_type), + round_type = round_type + ), mean_sd = paste0( round_fmt(mean(datvec), decimal + 1, round_type = round_type), " (", diff --git a/R/jjcsformats.R b/R/jjcsformats.R index e1e35513..cfd04ca8 100644 --- a/R/jjcsformats.R +++ b/R/jjcsformats.R @@ -47,8 +47,9 @@ jjcsformat_xx <- function( stop("Error: jjcsformat_xx do not use xxx. in input str, replace by xx. instead.") } - if (identical(str, "default")) + if (identical(str, "default")) { return(str) + } if (is_valid_format(str)) { rtable_format <- str diff --git a/tests/testthat/test-column_stats.R b/tests/testthat/test-column_stats.R index 7bd9912c..557a59de 100644 --- a/tests/testthat/test-column_stats.R +++ b/tests/testthat/test-column_stats.R @@ -248,7 +248,7 @@ test_that("column_stats handles iec round_type correctly", { ) expect_equal(result_SAS, "25.35") - # Compare to SAS rounding + # Compare to SAS rounding result_R_mod <- calc_one_visit( df$AVAL[df$AVISIT == "Week 1"], 1, From ddbce8a03c73fabddafd419382bb36e46ad090e5 Mon Sep 17 00:00:00 2001 From: munoztd0 <david.munoztord@mailbox.org> Date: Wed, 17 Dec 2025 14:11:33 +0100 Subject: [PATCH 49/49] fix: inheritParams in WORDLIST --- inst/WORDLIST | 1 - 1 file changed, 1 deletion(-) diff --git a/inst/WORDLIST b/inst/WORDLIST index 4a0e8bee..1c144cfa 100644 --- a/inst/WORDLIST +++ b/inst/WORDLIST @@ -81,7 +81,6 @@ ie IEC iec imputeObj -inheritParams inriskdiffcol insightsengineering jjcs