Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .renvignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@

jasp_dev_work_dir/
33 changes: 19 additions & 14 deletions R/unidimensionalReliabilityBayesian.R
Original file line number Diff line number Diff line change
Expand Up @@ -566,17 +566,18 @@ unidimensionalReliabilityBayesian <- function(jaspResults, dataset, options) {
if (is.null(out))
out <- list()

modelUse <- model[[if (options[["coefficientType"]] == "unstandardized") "gibbsSamp" else "gibbsCor"]]
# split-half always uses covariance matrices; standardized flag toggles SB vs FR inside .splithalfCor
isStd <- options[["coefficientType"]] == "standardized"

if (options[["scaleSplithalf"]] && is.null(model[["empty"]]) && !is.null(modelUse)) {
if (options[["scaleSplithalf"]] && is.null(model[["empty"]]) && !is.null(model[["gibbsSamp"]])) {

nit <- ncol(dataset)
splits <- split(seq_len(nit), 1:2)
startProgressbar(model[["progressbarLength"]])
out[["samp"]] <- matrix(NA, nrow(modelUse), ncol(modelUse))
for (i in seq_len(nrow(model[["gibbsCor"]]))) {
for (j in seq_len(ncol(model[["gibbsCor"]]))) {
out[["samp"]][i, j] <- .splithalfCor(modelUse[i, j, , ], splits, progressbarTick)
out[["samp"]] <- matrix(NA, nrow(model[["gibbsSamp"]]), ncol(model[["gibbsSamp"]]))
for (i in seq_len(nrow(model[["gibbsSamp"]]))) {
for (j in seq_len(ncol(model[["gibbsSamp"]]))) {
out[["samp"]][i, j] <- .splithalfCor(model[["gibbsSamp"]][i, j, , ], splits, standardized = isStd, callback = progressbarTick)
}
}

Expand Down Expand Up @@ -611,9 +612,11 @@ unidimensionalReliabilityBayesian <- function(jaspResults, dataset, options) {
if (is.null(out[["itemSamp"]])) {
startProgressbar(model[["progressbarLength"]] * ncol(dataset))

modelUse <- model[[if (options[["coefficientType"]] == "unstandardized") "gibbsSamp" else "gibbsCor"]]
# split-half always uses covariance matrices; standardized flag toggles SB vs FR
isStd <- options[["coefficientType"]] == "standardized"

out[["itemSamp"]] <- .BayesianItemDroppedStats(modelUse, .splithalfCor, progressbarTick, splithalf = TRUE)
out[["itemSamp"]] <- .BayesianItemDroppedStats(model[["gibbsSamp"]], .splithalfCor, progressbarTick,
splithalf = TRUE, standardized = isStd)
}

if (options[["samplesSavingDisabled"]])
Expand Down Expand Up @@ -1292,7 +1295,8 @@ unidimensionalReliabilityBayesian <- function(jaspResults, dataset, options) {
startProgressbar(4e3)
}
prior <- .samplePrior(n.item, nm, progressbarTick, options[["inverseWishartPriorScale"]], options[["inverseWishartPriorDf"]],
options[["inverseGammaPriorShape"]], options[["inverseGammaPriorScale"]], options[["normalPriorMean"]])
options[["inverseGammaPriorShape"]], options[["inverseGammaPriorScale"]], options[["normalPriorMean"]],
standardized = options[["coefficientType"]] == "standardized")

probsPrior[i] <- sum(prior[["y"]][poslow:end]) / sum(prior[["y"]]) -
sum(prior[["y"]][poshigh:end]) / sum(prior[["y"]])
Expand Down Expand Up @@ -1455,7 +1459,8 @@ unidimensionalReliabilityBayesian <- function(jaspResults, dataset, options) {
startProgressbar(4e3)
}
prior <- .samplePrior(n.item, nm, progressbarTick, options[["inverseWishartPriorScale"]], options[["inverseWishartPriorDf"]],
options[["inverseGammaPriorShape"]], options[["inverseGammaPriorScale"]], options[["normalPriorMean"]])
options[["inverseGammaPriorShape"]], options[["inverseGammaPriorScale"]], options[["normalPriorMean"]],
standardized = options[["coefficientType"]] == "standardized")
} else {
prior <- NULL
}
Expand Down Expand Up @@ -1859,7 +1864,7 @@ unidimensionalReliabilityBayesian <- function(jaspResults, dataset, options) {
}


.samplePrior <- function(k, estimate, callback = function(){}, k0, df0, a0, b0, m0) {
.samplePrior <- function(k, estimate, callback = function(){}, k0, df0, a0, b0, m0, standardized = FALSE) {

n_samp <- 2e3

Expand Down Expand Up @@ -1907,15 +1912,15 @@ unidimensionalReliabilityBayesian <- function(jaspResults, dataset, options) {
if (estimate == "scaleSplithalf") {
nit <- k
splits <- split(seq_len(nit), 1:2)
priorsplithalf<- apply(m, MARGIN = 1, .splithalfCor, splits = splits, callback)
priorsplithalf <- apply(m, MARGIN = 1, .splithalfCor, splits = splits, standardized = standardized, callback = callback)
out <- density(priorsplithalf, from = 0, to = 1, n = 512)
return(out)

}

}

.BayesianItemDroppedStats <- function(cov_samp, f1 = function(){}, callback = function(){}, splithalf = FALSE) {
.BayesianItemDroppedStats <- function(cov_samp, f1 = function(){}, callback = function(){}, splithalf = FALSE, standardized = FALSE) {

dd <- dim(cov_samp)
nit <- dd[3] - 1
Expand All @@ -1924,7 +1929,7 @@ unidimensionalReliabilityBayesian <- function(jaspResults, dataset, options) {
cov_samp <- array(cov_samp, c(dd[1] * dd[2], dd[3], dd[3]))
if (splithalf) {
for (i in seq_len(dd[3])) {
out[, i] <- apply(cov_samp[, -i, -i], c(1), f1, callback = callback, splits = splits)
out[, i] <- apply(cov_samp[, -i, -i], c(1), f1, callback = callback, splits = splits, standardized = standardized)
}
} else {
for (i in seq_len(dd[3])) {
Expand Down
180 changes: 142 additions & 38 deletions R/unidimensionalReliabilityFrequentist.R
Original file line number Diff line number Diff line change
Expand Up @@ -446,13 +446,10 @@ unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options)

nit <- ncol(dataset)
splits <- split(seq_len(nit), 1:2)
if (options[["coefficientType"]] == "unstandardized") {
for (i in seq_len(options[["bootstrapSamples"]])) {
out[["samp"]][i] <- .splithalfCor(model[["bootSamp"]][i, , ], splits, progressbarTick)
}
} else { # either we have the boostrapped cor samples from the standardized coefficients or we have them through
# the splithalf method
out[["samp"]] <- apply(model[["bootCor"]], 1, .splithalfCor, splits = splits)
isStd <- options[["coefficientType"]] == "standardized"
# split-half bootstrap always uses covariance matrices (bootSamp)
for (i in seq_len(options[["bootstrapSamples"]])) {
out[["samp"]][i] <- .splithalfCor(model[["bootSamp"]][i, , ], splits, standardized = isStd, callback = progressbarTick)
}
}
}
Expand All @@ -476,18 +473,19 @@ unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options)

if (options[["itemDeletedSplithalf"]] && is.null(model[["empty"]]) && options[["intervalMethod"]] == "bootstrapped") {

type <- ifelse(options[["coefficientType"]] == "unstandardized", "bootSamp", "bootCor")

startProgressbar(options[["bootstrapSamples"]] * ncol(dataset))
jaspBase::.setSeedJASP(options)

isStd <- options[["coefficientType"]] == "standardized"
nit <- ncol(dataset) - 1
splits <- split(seq_len(nit), 1:2)

out[["itemSamp"]] <- .frequentistItemDroppedStats(covSamp = model[[type]],
# split-half bootstrap always uses covariance matrices (bootSamp)
out[["itemSamp"]] <- .frequentistItemDroppedStats(covSamp = model[["bootSamp"]],
f1 = .splithalfCor,
callback = progressbarTick,
splits = splits)
splits = splits,
standardized = isStd)

if (options[["samplesSavingDisabled"]])
return(out)
Expand Down Expand Up @@ -947,16 +945,16 @@ unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options)
if (options[["scaleSplithalf"]]) {
nit <- ncol(dataset)
splits <- split(seq_len(nit), 1:2)
out[["est"]][["scaleSplithalf"]] <- .splithalfData(dtUse, splits = splits, useCase = model[["use.cases"]])
isStd <- options[["coefficientType"]] == "standardized"
# split-half always uses raw data; standardized = Spearman-Brown, unstandardized = Flanagan-Rulon
out[["est"]][["scaleSplithalf"]] <- .splithalfData(dataset, splits = splits, useCase = model[["use.cases"]], standardized = isStd)
if (options[["intervalMethod"]] == "bootstrapped") {
samp <- model[["scaleSplithalf"]][["samp"]]
out[["conf"]][["scaleSplithalf"]] <- quantile(samp, probs = c((1 - ciValue) / 2, 1 - (1 - ciValue) / 2), na.rm = TRUE)
out[["se"]][["scaleSplithalf"]] <- sd(samp, na.rm = TRUE)
} else { # interval analytic
partSums1 <- rowSums(dtUse[, splits[[1]], drop = FALSE])
partSums2 <- rowSums(dtUse[, splits[[2]], drop = FALSE])

out[["se"]][["scaleSplithalf"]] <- .seSplithalf(partSums1, partSums2, model[["use.cases"]])
out[["se"]][["scaleSplithalf"]] <- .seSplithalf(dataset, splits, standardized = isStd, scaleThreshold = options[["hiddenScaleThreshold"]])
Copy link

Copilot AI Feb 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The split-half coefficient calculation uses raw dataset without considering model[["use.cases"]] for missing data handling in the analytic SE calculation (.seSplithalf), while the point estimate correctly uses model[["use.cases"]]. This inconsistency could lead to mismatched dimensions or incorrect SE when there are missing values. Consider ensuring both use the same data or use cases approach.

Suggested change
out[["se"]][["scaleSplithalf"]] <- .seSplithalf(dataset, splits, standardized = isStd, scaleThreshold = options[["hiddenScaleThreshold"]])
out[["se"]][["scaleSplithalf"]] <- .seSplithalf(dtUse, splits, standardized = isStd, scaleThreshold = options[["hiddenScaleThreshold"]])

Copilot uses AI. Check for mistakes.
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

no, thats not correct. The split-half coefficient for the standardized case operates on the raw data with a different formula, if we used dtUse it would use the standardized data.

out[["conf"]][["scaleSplithalf"]] <- out[["est"]][["scaleSplithalf"]] + c(-1, 1) * out[["se"]][["scaleSplithalf"]] * qnorm(1 - (1 - ciValue) / 2)
}
}
Expand All @@ -969,10 +967,13 @@ unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options)
out[["conf"]][["averageInterItemCorrelation"]] <- quantile(samp, probs = c((1 - ciValue) / 2, 1 - (1 - ciValue) / 2), na.rm = TRUE)
out[["se"]][["averageInterItemCorrelation"]] <- sd(samp, na.rm = TRUE)
} else { # interval analytic
# TODO: what is the SE of the average interitem correlation?
out[["se"]][["averageInterItemCorrelation"]] <- NA
if (model[["pairwise"]]) {
out[["se"]][["averageInterItemCorrelation"]] <- NA
out[["error"]][["averageInterItemCorrelation"]] <- gettext("The analytic confidence interval is not available for the average interitem correlation when data contain missings and pairwise complete observations are used. Try changing to 'Delete listwise' within 'Advanced Options'.")
} else {
out[["se"]][["averageInterItemCorrelation"]] <- .seAverageInterItemCor(dtUse, scaleThreshold = options[["hiddenScaleThreshold"]])
}
out[["conf"]][["averageInterItemCorrelation"]] <- out[["est"]][["averageInterItemCorrelation"]] + c(-1, 1) * out[["se"]][["averageInterItemCorrelation"]] * qnorm(1 - (1 - ciValue) / 2)
out[["error"]][["averageInterItemCorrelation"]] <- gettext("The standard error of the average interitem correlation is not available. ")
}
}

Expand Down Expand Up @@ -1170,19 +1171,17 @@ unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options)
out[["lower"]][["itemDeletedSplithalf"]] <- c(NA, NA)
out[["upper"]][["itemDeletedSplithalf"]] <- c(NA, NA)
} else {
for (i in seq_len(ncol(dtUse))) {
dtCut <- dtUse[, -i, drop = FALSE]
isStd <- options[["coefficientType"]] == "standardized"
for (i in seq_len(ncol(dataset))) {
dtCut <- dataset[, -i, drop = FALSE]
nit <- ncol(dtCut)
splits <- split(seq_len(nit), 1:2)
est <- .splithalfData(dtCut, splits = splits, useCase = model[["use.cases"]])
est <- .splithalfData(dtCut, splits = splits, useCase = model[["use.cases"]], standardized = isStd)
out[["est"]][["itemDeletedSplithalf"]][i] <- est

if (options[["intervalMethod"]] == "analytic") {

partSums1 <- rowSums(dtCut[, splits[[1]]])
partSums2 <- rowSums(dtCut[, splits[[2]]])

se <- .seSplithalf(partSums1, partSums2, model[["use.cases"]])
se <- .seSplithalf(dtCut, splits, standardized = isStd, scaleThreshold = options[["hiddenScaleThreshold"]])
conf <- est + c(-1, 1) * se * qnorm(1 - (1 - ciValue) / 2)
out[["lower"]][["itemDeletedSplithalf"]][i] <- conf[1]
out[["upper"]][["itemDeletedSplithalf"]][i] <- conf[2]
Expand Down Expand Up @@ -1573,13 +1572,14 @@ unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options)
f1 = function(){},
callback = function(){},
missing = NULL,
splits = NULL) {
splits = NULL,
standardized = FALSE) {

dd <- dim(covSamp)
out <- matrix(0, dd[1], dd[3])
if (!is.null(splits)) { # split half
for (i in seq_len(dd[3])) {
out[, i] <- apply(covSamp[, -i, -i], c(1), f1, callback = callback, splits = splits)
out[, i] <- apply(covSamp[, -i, -i], c(1), f1, callback = callback, splits = splits, standardized = standardized)
}
} else {
if (!is.null(missing)) { # cfa
Expand Down Expand Up @@ -2040,17 +2040,69 @@ unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options)
}


.splithalfData <- function(X, splits, useCase) {
# SE of average inter-item correlation via multivariate delta method on vec(Sigma).
# The average inter-item correlation is r_bar = (1 / (J*(J-1))) * sum_{i!=j} C_ij / sqrt(C_ii * C_jj)
# where C = Var(X). The gradient w.r.t. vec(C) is obtained by differentiating each
# r_ij = C_ij / sqrt(C_ii * C_jj) w.r.t. C_ab and averaging.
.seAverageInterItemCor <- function(X, VC = NULL, scaleThreshold = 10) {
J <- ncol(X)
if (is.null(VC)) {
levs <- sapply(as.data.frame(X), function(col) length(unique(col[!is.na(col)])))
if (any(levs > scaleThreshold)) {
VC <- .varVCwishart(stats::var(X), nrow(X))
} else {
VC <- .varCM(X)
}
}

C <- var(X)
R <- cov2cor(C)
m <- J * (J - 1) # number of off-diagonal pairs (both triangles)

# Build J x J gradient matrix G_mat where entry (a, b) = d(r_bar) / d(C_ab).
# Off-diagonal (a != b): d(r_bar)/d(C_ab) = 1 / (m * sqrt(C_aa * C_bb))
# because only r_ab depends on C_ab (as numerator), and d(r_ab)/d(C_ab) = 1/sqrt(C_aa*C_bb),
# plus r_ba = r_ab so appears twice in the double sum, but m counts both triangles.
# Diagonal (a = a): d(r_bar)/d(C_aa) = -sum_{j!=a} R_aj / (m * C_aa)
# because d(r_aj)/d(C_aa) = -C_aj / (2 * C_aa * sqrt(C_aa*C_jj)) = -R_aj / (2*C_aa),
# summed over all j != a in both triangles (each pair appears twice) gives -sum_{j!=a} R_aj / (m*C_aa).
Gmat <- matrix(0, J, J)
dC <- diag(C)
for (a in seq_len(J)) {
for (b in seq_len(J)) {
if (a != b) {
Gmat[a, b] <- 1 / (m * sqrt(dC[a] * dC[b]))
} else {
Gmat[a, a] <- -sum(R[a, -a]) / (m * dC[a])
}
}
}
Comment on lines +2071 to +2079
Copy link

Copilot AI Feb 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The gradient calculation involves division by sqrt(dC[a] * dC[b]) for off-diagonal elements and by dC[a] for diagonal elements. When any variance (dC[a] or dC[b]) is zero or near-zero, this will cause numerical problems. Consider adding validation to check for zero or near-zero variances and handle them appropriately (e.g., return NA).

Copilot uses AI. Check for mistakes.

# Vectorize: vec(G_mat) as a 1 x J^2 row vector, column-major order matching vec(C)
G <- matrix(as.vector(Gmat), nrow = 1)
V <- G %*% VC %*% t(G)
return(sqrt(as.numeric(V)))
}


.splithalfData <- function(X, splits, useCase, standardized = FALSE) {

partSums1 <- rowSums(X[, splits[[1]], drop = FALSE])
partSums2 <- rowSums(X[, splits[[2]], drop = FALSE])

rsh_uncorrected <- cor(partSums1, partSums2, use = useCase)
rsh <- (2 * rsh_uncorrected) / (1 + rsh_uncorrected)
if (standardized) {
# Spearman-Brown coefficient: 2r/(1+r) on raw data correlation
r <- cor(partSums1, partSums2, use = useCase)
rsh <- (2 * r) / (1 + r)
Comment on lines +2093 to +2096
Copy link

Copilot AI Feb 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The Spearman-Brown calculation could encounter division by zero when (1 + r) is zero, i.e., when r = -1. This is theoretically possible when the two halves have perfect negative correlation. Consider adding a check for this edge case and returning NA or an appropriate value.

Copilot uses AI. Check for mistakes.
} else {
# Flanagan-Rulon / Guttman split-half: 4 * Cov(X1, X2) / Var(X)
totalScore <- partSums1 + partSums2
rsh <- 4 * cov(partSums1, partSums2, use = useCase) / var(totalScore, na.rm = TRUE)
Copy link

Copilot AI Feb 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The Flanagan-Rulon calculation in the unstandardized case could be problematic when var(totalScore) is zero or near-zero, which can occur with constant data or extreme cases. Consider adding a check and returning NA or an appropriate error when variance is zero to prevent division by zero.

Suggested change
rsh <- 4 * cov(partSums1, partSums2, use = useCase) / var(totalScore, na.rm = TRUE)
varTotal <- var(totalScore, na.rm = TRUE)
if (is.na(varTotal) || varTotal <= .Machine$double.eps) {
# Variance of total score is zero or effectively zero; reliability undefined
rsh <- NA_real_
} else {
rsh <- 4 * cov(partSums1, partSums2, use = useCase) / varTotal
}

Copilot uses AI. Check for mistakes.
}
return(rsh)
}

.splithalfCor <- function(R, splits, callback = function(){}) {
.splithalfCor <- function(R, splits, standardized = FALSE, callback = function(){}) {

R_AA <- R[splits[[1]], splits[[1]]]
R_BB <- R[splits[[2]], splits[[2]]]
Expand All @@ -2060,18 +2112,70 @@ unidimensionalReliabilityFrequentist <- function(jaspResults, dataset, options)
Var_XB <- sum(R_BB)
Cov_XA_XB <- sum(R_AB)

rsh_uncorrected <- Cov_XA_XB / sqrt(Var_XA * Var_XB)
rsh <- (2 * rsh_uncorrected) / (1 + rsh_uncorrected)
if (standardized) {
# Spearman-Brown: correlation then 2r/(1+r)
r <- Cov_XA_XB / sqrt(Var_XA * Var_XB)
rsh <- (2 * r) / (1 + r)
Comment on lines +2115 to +2118
Copy link

Copilot AI Feb 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The Spearman-Brown calculation could encounter division by zero when (1 + r) is zero, i.e., when r = -1. This is theoretically possible when the two halves have perfect negative correlation. Consider adding a check for this edge case and returning NA or an appropriate value.

Copilot uses AI. Check for mistakes.
} else {
# Flanagan-Rulon / Guttman split-half: 4 * Cov(X1, X2) / Var(X)
Var_X <- Var_XA + Var_XB + 2 * Cov_XA_XB
if (is.na(Var_X) || abs(Var_X) < .Machine$double.eps) {
rsh <- NA_real_
} else {
rsh <- 4 * Cov_XA_XB / Var_X
}
}

callback()
return(rsh)
}

.seSplithalf <- function(x, y, useObs){
k <- cor(x, y, use = useObs)
seK <- .seCor(k, length(x))
sh <- 2 * k / (1 + k)
return((sh/k - sh/(1 + k)) * seK)
.seSplithalf <- function(X, splits, standardized = FALSE, VC = NULL, scaleThreshold = 10) {
# Multivariate delta method SE, analogous to .seLambda1 / .seLambda2.
# Computes the gradient of the split-half coefficient w.r.t. vec(Sigma)
# and combines with the variance of the sample covariance matrix.
J <- ncol(X)

if (is.null(VC)) {
levs <- sapply(as.data.frame(X), function(col) length(unique(col[!is.na(col)])))
if (any(levs > scaleThreshold)) {
VC <- .varVCwishart(stats::var(X, use = "pairwise.complete.obs"), nrow(X))
} else {
VC <- .varCM(X)
}
}

C <- var(X, use = "pairwise.complete.obs")
vecC <- as.vector(C) # J^2 elements, column-major

# Indicator vectors (length J^2) for block membership
inA <- seq_len(J) %in% splits[[1]]
inB <- seq_len(J) %in% splits[[2]]
mAB <- as.vector(outer(inA, inB)) # i in A, j in B
mAA <- as.vector(outer(inA, inA)) # i,j in A
mBB <- as.vector(outer(inB, inB)) # i,j in B

C_AB <- sum(mAB * vecC)
V_A <- sum(mAA * vecC)
V_B <- sum(mBB * vecC)
S <- sum(vecC)

if (standardized) {
# Spearman-Brown: SB = 2r/(1+r), r = C_AB / sqrt(V_A * V_B)
sqrtVAVB <- sqrt(V_A * V_B)
r <- C_AB / sqrtVAVB
# dr/d(vecSigma)
dr <- mAB / sqrtVAVB - (r / (2 * V_A)) * mAA - (r / (2 * V_B)) * mBB
G <- matrix((2 / (1 + r)^2) * dr, nrow = 1)
Copy link

Copilot AI Feb 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the standardized case, the gradient calculation could be problematic when (1 + r)^2 is zero or near-zero (i.e., when r approaches -1). This could lead to numerical instability or division by zero. Consider adding validation or numerical safeguards for this edge case.

Suggested change
G <- matrix((2 / (1 + r)^2) * dr, nrow = 1)
# Guard against (1 + r)^2 being zero or numerically too small
denom <- (1 + r)^2
if (denom < .Machine$double.eps) {
denom <- .Machine$double.eps
}
G <- matrix((2 / denom) * dr, nrow = 1)

Copilot uses AI. Check for mistakes.
} else {
# Flanagan-Rulon: FR = 4 * C_AB / S
# dFR/d(vecSigma) = 4*(a_AB * S - C_AB) / S^2
Copy link

Copilot AI Feb 19, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the Flanagan-Rulon case, the gradient calculation involves division by S^2 (total variance squared). When S is zero or near-zero (constant data), this will cause numerical problems. Consider adding validation to check for this case and return NA for the standard error when appropriate.

Suggested change
# dFR/d(vecSigma) = 4*(a_AB * S - C_AB) / S^2
# dFR/d(vecSigma) = 4*(a_AB * S - C_AB) / S^2
# Guard against S being zero or numerically near-zero, which would
# cause division by S^2 to be unstable; in that case, the SE is undefined.
if (abs(S) < sqrt(.Machine$double.eps)) {
return(NA_real_)
}

Copilot uses AI. Check for mistakes.
u <- rep(1, J^2)
G <- matrix(4 * (mAB * S - C_AB * u) / S^2, nrow = 1)
}

V <- G %*% VC %*% t(G)
return(sqrt(as.numeric(V)))
}

.seCor <- function(r, n) { # Bonett (2008)
Expand Down
8 changes: 8 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -1 +1,9 @@
# jaspReliability
<div align="right">

[![Unit Tests](https://github.com/jasp-stats/jaspReliability/actions/workflows/unittests.yml/badge.svg)](https://github.com/jasp-stats/jaspReliability/actions/workflows/unittests.yml)
[![codecov](https://codecov.io/gh/jasp-stats/jaspReliability/branch/master/graph/badge.svg)](https://codecov.io/gh/jasp-stats/jaspReliability)
<br>
<b>Maintainer:</b> <a href="https://github.com/juliuspfadt/">Julius Pfadt</a>

</div>
Loading