diff --git a/DESCRIPTION b/DESCRIPTION index 4a61ec6..e567bd2 100644 --- a/DESCRIPTION +++ b/DESCRIPTION @@ -1,14 +1,14 @@ Package: cocons Type: Package Title: Covariate-Based Covariance Functions for Nonstationary Spatial Modeling -Version: 0.1.4 +Version: 0.1.5 Author: Federico Blasi [aut, cre] (), Reinhard Furrer [ctb] () -Authors@R: c(person("Federico", "Blasi", email = "federico.blasi@gmail.com", +Authors@R: c(person("Federico", "Blasi", email = "federicoblasi at gmail.com", role = c("aut", "cre"), comment = c(ORCID = "0000-0001-9337-7154")), person("Reinhard", "Furrer", role = c("ctb"), comment = c(ORCID = "0000-0002-6319-2332")) ) -Maintainer: Federico Blasi +Maintainer: Federico Blasi Description: Estimation, prediction, and simulation of nonstationary Gaussian process with modular covariate-based covariance functions. Sources of nonstationarity, such as spatial mean, variance, geometric anisotropy, smoothness, and nugget, can be considered based on spatial characteristics. An induced compact-supported nonstationary covariance function is provided, enabling fast and memory-efficient computations when handling densely sampled domains. diff --git a/NEWS.md b/NEWS.md index 5b968a1..02046bb 100644 --- a/NEWS.md +++ b/NEWS.md @@ -1,3 +1,10 @@ +## cocons 0.1.5 + +- Vignette updated +- changed default max.iters from 200 to 500. +- now `coco` objects store information about optim.control arguments for full reproducibility. +- introduce a two-step optimization approach for automatic model selection. See `help(coco)`. + ## cocons 0.1.4 - Documentation polished diff --git a/R/RcppExports.R b/R/RcppExports.R index f94d126..8d9c422 100644 --- a/R/RcppExports.R +++ b/R/RcppExports.R @@ -1,6 +1,16 @@ # Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 +#' smoothed-L1 penalization over the covariate-driven parameters +#' +#' @param x a vector of parameters to penalize +#' @param lambda penalization hyperparameter +#' @param alpha hyperparameter to approximate L1 penalization. By default 1e6 provides a decent approximation to L1, while also being a differentiable function +#' @return penalization +sumsmoothlone <- function(x, lambda, alpha = 1e6) { + .Call(`_cocons_sumsmoothlone`, x, lambda, alpha) +} + #' Dense covariance function (difference parameterization) #' #' @param theta vector of parameters diff --git a/R/checkFunctions.R b/R/checkFunctions.R index 0e0eba7..06997d1 100644 --- a/R/checkFunctions.R +++ b/R/checkFunctions.R @@ -17,6 +17,10 @@ optim.control$control$factr <- to_update$control$factr } + if(is.null(optim.control$control$ndeps)){ + optim.control$control$ndeps <- to_update$control$ndeps + } + if(is.null(optim.control$control$trace)){ optim.control$control$trace <- to_update$control$trace } @@ -269,9 +273,21 @@ stop("smooth limits not specified.") } - if (!is.null(info$lambda)) { - if(info$lambda < 0){ - stop("lambda must be non-negative.") + if (!is.null(info$lambda.reg)) { + if(info$lambda.reg < 0){ + stop("lambda.reg must be non-negative.") + } + } + + if (!is.null(info$lambda.Sigma)) { + if(info$lambda.Sigma < 0){ + stop("lambda.Sigma must be non-negative.") + } + } + + if (!is.null(info$lambda.betas)) { + if(info$lambda.betas < 0){ + stop("lambda.betas must be non-negative.") } } @@ -451,11 +467,21 @@ .cocons.getPen <- function(n, lambda, theta_list, smooth.limits){ - return(2 * n * lambda * exp(theta_list$scale[1]) * - sqrt(((smooth.limits[2]-smooth.limits[1])/ - (1 + exp(-theta_list$smooth[1])) + - smooth.limits[1])) - ) + if(T){ + summ <- lambda[3] * exp(theta_list$scale[1]) * + sqrt(((smooth.limits[2]-smooth.limits[1])/ + (1 + exp(-theta_list$smooth[1])) + + smooth.limits[1])) + sumsmoothlone(x = unlist(theta_list[[1]][-1]), lambda[2]) + } + + for(ii in 2:6){ + + lambda_i <- sumsmoothlone(x = unlist(theta_list[[ii]][-1]), lambda[1]) + summ <- summ + lambda_i + + } + + return(2 * n * summ) } @@ -478,3 +504,98 @@ return(namesToUpdate) } + + +.cocons.update.coco.first.step <- function(coco.object, output, boundaries){ + + # Update model structure + + par_pos <- getDesignMatrix(coco.object@model.list,data = coco.object@data)$par.pos + + number_parameters_models <- lapply(par_pos,sum) + + new_formulas_list <- coco.object@model.list + + coco.object@output <- output + + parss <- getEstims(coco.object) + + # What if one estimate is exactly 0 ? + # Check models formulas keeping environment + + for(ii in 1:7){ + + if(!is.formula(coco.object@model.list[[ii]])){ + next + } + + to_zero <- which(abs(parss[[ii]][par_pos[[ii]]]) <= coco.object@info$sparse.point) + + if(length(to_zero) == 0){ + next + } + + to_zero_covs <- sum(abs(parss[[ii]][par_pos[[ii]]]) <= coco.object@info$sparse.point) + + if( to_zero_covs == (number_parameters_models[[ii]] - 1) || (to_zero_covs == (number_parameters_models[[ii]]))){ + new_formulas_list[[ii]] <- stats::as.formula("~1", env = globalenv()) + next + } + + # Update formula + + # If the only parameter to drop is the intercept + if(length(to_zero) == 1 && to_zero == 1){ + next + } + + new_terms <- stats::drop.terms(stats::terms(coco.object@model.list[[ii]]), dropx = (to_zero - 1), keep.response = TRUE) + + if(attr(new_terms,"intercept") & (length(attr(new_terms,"term.labels")) > 0)){ + new_terms <- c("1",attr(new_terms, "term.labels")) + } else{ + new_terms <- attr(new_terms, "term.labels") + } + + new_formula <- stats::reformulate(new_terms, response = NULL, env = globalenv()) + new_formulas_list[[ii]] <- new_formula + + } + + index_where <- 0 + + vector_logical <- logical(length = length(output$par)) + + for(ii in 1:7){ + + if(!is.formula(coco.object@model.list[[ii]])){ + next + } + + to_zero <- which(abs(parss[[ii]][par_pos[[ii]]]) <= coco.object@info$sparse.point) + + if(any(to_zero == 1) && (length(to_zero) > 1)){ + to_zero <- to_zero[-1] + } + + if(length(to_zero) == 1 && to_zero == 1){ + index_where <- index_where + number_parameters_models[[ii]] + next + } + + index_to_place <- (index_where + 1 ) : (index_where + number_parameters_models[[ii]]) + + vector_logical[index_to_place][to_zero] <- TRUE + + index_where <- index_where + number_parameters_models[[ii]] + + } + + boundaries_new <- lapply(boundaries, function(x) x[!vector_logical]) + + coco.object@info$boundaries <- boundaries_new + + coco.object@model.list <- new_formulas_list + + return(coco.object) +} diff --git a/R/cocons.R b/R/cocons.R index 3fb1aa4..02124ca 100644 --- a/R/cocons.R +++ b/R/cocons.R @@ -26,7 +26,10 @@ #' \eqn{\nu_{l}}, and \eqn{\nu_{u}} are the lower and upper bounds limiting the range of variation of the spatially-varying smoothness, and where \eqn{\boldsymbol{X}_{\ell}} relates to the design matrix defined by the specific models for each of the source of nonstationarity. #' #' Lastly, arguments for the \code{"info"} list argument involve: \itemize{ -#' \item \code{"lambda"}: (\code{numeric}) a positive scalar specifying the regularization parameter. Larger values penalizes highly-smoothed long-tailed covariance functions. +#' \item \code{"lambda.reg"}: (\code{numeric}) a positive scalar specifying the regularization parameter. Larger values regularize highly-smoothed long-tailed covariance functions. +#' \item \code{"lambda.Sigma"}: (\code{numeric}) a positive scalar specifying the penalization parameter for the covariate-driven covariance parameters. +#' \item \code{"lambda.betas"}: (\code{numeric}) a positive scalar specifying the penalization parameter for the covariate-driven spatial mean parameters. +#' \item \code{"sparse.point"}: (\code{numeric}) a cutting point for which smaller coefficients in absolute value will be set to zero after the smoothed L1 penalization optimization. Used in combination with \code{lambda.Sigma} and \code{lambda.betas}. By default, it is set to \code{1e-4}. #' \item \code{"smooth.limits"}: (\code{numeric vector}) specifying the range of variation for the spatially varying smoothness (e.g. c(0.5, 2.5)). #' \item \code{"taper"}: (\code{numeric}) specifying the desired taper function from the spam package (only for "sparse" coco objects). #' \item \code{"delta"}: (\code{numeric}) specifying the taper range/scale (only for "sparse" coco objects). @@ -135,8 +138,20 @@ coco <- function(type, .cocons.check.output(output) - if(is.null(info$lambda)){ - info$lambda <- 0 + if(is.null(info$lambda.reg)){ + info$lambda.reg <- 0 + } + + if(is.null(info$lambda.betas)){ + info$lambda.betas <- 0 + } + + if(is.null(info$lambda.Sigma)){ + info$lambda.Sigma <- 0 + } + + if(any(c(info$lambda.betas,info$lambda.Sigma) > 0) && (is.null(info$sparse.point))){ + info$sparse.point <- 1e-4 } if(T){ diff --git a/R/getFunctions.R b/R/getFunctions.R index 28b2ca9..29310ee 100644 --- a/R/getFunctions.R +++ b/R/getFunctions.R @@ -617,6 +617,71 @@ getModelLists <- function(theta, par.pos, type = "diff"){ } } +#' Simple build of boundaries +#' @description provides a set of upper and lower bounds for the L-BFGS-B routine +#' +#' @usage getBoundariesV4(coco.object, lower.bound, upper.bound) +#' @param coco.object \code{(S4)} a coco.object. +#' @param lower.bound \code{(numeric vector)} lower bound for covariate-driven parameters of the nonstationary covariance function. +#' @param upper.bound \code{(numeric vector)} upper bound for covariate-driven parameters of the nonstationary covariance function. +#' @returns (\code{list}) a list with boundaries and simple init values for the optim L-BFGS-B routine +#' @author Federico Blasi +#' +getBoundariesV4 <- function(coco.object, lower.bound = 2, upper.bound = 2){ + + designMatrix <- getDesignMatrix(model.list = coco.object@model.list, data = coco.object@data) + + term_labels <- paste("z", "~" , paste(coco.object@model.list$mean)[2]) + + # Skip scaling + if(any(colnames(coco.object@data[,coco.object@info$skip.scale]) %in% + colnames(coco.object@data))){ + tmp_info <- .cocons.setDesignMatrixCat(coco.object, designMatrix = designMatrix) + tmp_values <- tmp_info$tmp_values + mod_DM <- tmp_info$mod_DM + } else { + tmp_values <- cocons::getScale(designMatrix$model.matrix) + mod_DM <- tmp_values$std.covs + } + + coefs_lm <- stats::coef(stats::lm(term_labels,cbind.data.frame('z' = coco.object@z,mod_DM))) + + boundaries_B <- getBoundariesV2(coco.object = coco.object, + mean.limits = c(-Inf, 0, Inf), + std.dev.limits = c(-lower.bound, 0, upper.bound), + scale.limits = c(-lower.bound, 0, upper.bound), + aniso.limits = c(-lower.bound, 0, upper.bound), + tilt.limits = c(-lower.bound, 0, upper.bound), + smooth.limits = c(-lower.bound, 0, upper.bound), + nugget.limits = c(-lower.bound, 0, upper.bound)) + + boundaries_B$theta_init[1:length(coefs_lm)] <- coefs_lm + + first_sd <- which(names(boundaries_B$theta_init) == "std.dev.limits")[1] + n_var <- length(which(names(boundaries_B$theta_init) == "std.dev.limits")) - 1 + + first_range <- which(names(boundaries_B$theta_init) == "scale.limits")[1] + n_range <- length(which(names(boundaries_B$theta_init) == "scale.limits")) - 1 + + first_smooth <- which(names(boundaries_B$theta_init) == "smooth.limits")[1] + n_smooth <- length(which(names(boundaries_B$theta_init) == "smooth.limits")) - 1 + + boundaries_B$theta_init[first_range] <- (log(stats::sd(coco.object@z)) - log(stats::sd(c(stats::dist(coco.object@locs)))))/2 + boundaries_B$theta_init[first_sd] <- (log(stats::sd(coco.object@z)) + log(stats::sd(c(stats::dist(coco.object@locs)))))/2 + + boundaries_B$theta_upper[c(first_sd, first_range)] <- c(3 * abs(boundaries_B$theta_init[first_sd]), 3 * abs(boundaries_B$theta_init[first_range])) + boundaries_B$theta_lower[c(first_sd, first_range)] <- c(-3 * abs(boundaries_B$theta_init[first_sd]), -3 * abs(boundaries_B$theta_init[first_range])) + + boundaries_B$theta_upper[first_smooth] <- 3 + boundaries_B$theta_lower[first_smooth] <- -3.5 + + boundaries_B$theta_init[1] <- mean(coco.object@z) + boundaries_B$theta_upper[1] <- boundaries_B$theta_init[1] + 3 * boundaries_B$theta_init[1] + boundaries_B$theta_lower[1] <- boundaries_B$theta_init[1] - 3 * boundaries_B$theta_init[1] + + return(boundaries_B) +} + #' Simple build of boundaries #' @description provides a generic set of upper and lower bounds for the L-BFGS-B routine #' @@ -894,7 +959,7 @@ getHessian <- function(coco.object, ncores = parallel::detectCores() - 1, smooth.limits = coco.object@info$smooth.limits, n = n, z = coco.object@z, - lambda = coco.object@info$lambda) + lambda = c(coco.object@info$lambda.Sigma, coco.object@info$lambda.betas, coco.object@info$lambda.reg)) } else{f00 <- coco.object@output$value} @@ -925,7 +990,7 @@ getHessian <- function(coco.object, ncores = parallel::detectCores() - 1, smooth.limits = coco.object@info$smooth.limits, n = n, z = coco.object@z, - lambda = coco.object@info$lambda) + lambda = c(coco.object@info$lambda.Sigma, coco.object@info$lambda.betas, coco.object@info$lambda.reg)) f10 <- cocons::GetNeg2loglikelihood(t10, par.pos = par.pos, locs = coco.object@locs, @@ -933,7 +998,7 @@ getHessian <- function(coco.object, ncores = parallel::detectCores() - 1, smooth.limits = coco.object@info$smooth.limits, n = n, z = coco.object@z, - lambda = coco.object@info$lambda) + lambda = c(coco.object@info$lambda.Sigma, coco.object@info$lambda.betas, coco.object@info$lambda.reg)) f11 <- cocons::GetNeg2loglikelihood(t11, par.pos = par.pos, locs = coco.object@locs, @@ -941,7 +1006,7 @@ getHessian <- function(coco.object, ncores = parallel::detectCores() - 1, smooth.limits = coco.object@info$smooth.limits, n = n, z = coco.object@z, - lambda = coco.object@info$lambda) + lambda = c(coco.object@info$lambda.Sigma, coco.object@info$lambda.betas, coco.object@info$lambda.reg)) 0.5 * ((f11 - f01 - f10 + f00) / (eps*eps)) }) @@ -1009,7 +1074,7 @@ getHessian <- function(coco.object, ncores = parallel::detectCores() - 1, cholS = cholS, z = coco.object@z, n = n, - lambda = coco.object@info$lambda) + lambda = c(coco.object@info$lambda.Sigma, coco.object@info$lambda.betas, coco.object@info$lambda.reg)) } else{f00 <- coco.object@output$value} @@ -1044,7 +1109,7 @@ getHessian <- function(coco.object, ncores = parallel::detectCores() - 1, cholS = cholS, z = coco.object@z, n = n, - lambda = coco.object@info$lambda) + lambda = c(coco.object@info$lambda.Sigma, coco.object@info$lambda.betas, coco.object@info$lambda.reg)) f10 <- cocons::GetNeg2loglikelihoodTaper(theta = t10, par.pos = par.pos, @@ -1055,7 +1120,7 @@ getHessian <- function(coco.object, ncores = parallel::detectCores() - 1, cholS = cholS, z = coco.object@z, n = n, - lambda = coco.object@info$lambda) + lambda = c(coco.object@info$lambda.Sigma, coco.object@info$lambda.betas, coco.object@info$lambda.reg)) f11 <- cocons::GetNeg2loglikelihoodTaper(theta = t11, par.pos = par.pos, @@ -1066,7 +1131,7 @@ getHessian <- function(coco.object, ncores = parallel::detectCores() - 1, cholS = cholS, z = coco.object@z, n = n, - lambda = coco.object@info$lambda) + lambda = c(coco.object@info$lambda.Sigma, coco.object@info$lambda.betas, coco.object@info$lambda.reg)) 0.5 * ((f11 - f01 - f10 + f00) / (eps*eps)) diff --git a/R/methods.R b/R/methods.R index fe02e69..66b384b 100644 --- a/R/methods.R +++ b/R/methods.R @@ -570,3 +570,65 @@ setMethod("summary", signature(object = "coco"), } } ) + + + +#' Show Method for Coco Class +#' +#' This method show objects of class 'coco'. +#' @name show +#' @aliases show,coco-method +#' @param object (\code{S4}) An object of class 'coco'. +#' @return A plot is created. +#' @docType methods +#' @rdname show-methods +#' @author Federico Blasi +#' +setMethod("show", + signature(object = "coco"), + function(object){ + + if(object@type == "dense"){ + + cat(sprintf("%-30s %30s\n", "coco object", object@type)) + cat(sprintf("%-30s %30s\n", "fitted", ifelse(identical(object@output, list()), yes = "no", + no = "yes"))) + + cat(rep("-", 40), "\n") + cat(sprintf("%-30s %30s\n", "dataset dim", paste0(dim(object@data), collapse = ", "))) + + # number of parameters + + cat(sprintf("%-30s %30s\n", "model parameters", .cocons.get.npars(object))) + + # covariates names + + coco_info_light <- getDesignMatrix(model.list = object@model.list, data = object@data[1, , drop = FALSE]) + + cat(sprintf("%-30s %10s", "covariates ", paste0(colnames(coco_info_light$model.matrix),collapse = ", "))) + + } + + if(object@type == "sparse"){ + + cat(sprintf("%-30s %30s\n", "coco object", object@type)) + cat(sprintf("%-30s %30s\n", "fitted", ifelse(identical(object@output, list()), yes = "no", + no = "yes"))) + + cat(rep("-", 40), "\n") + cat(sprintf("%-30s %30s\n", "dataset dim", paste0(dim(object@data), collapse = ", "))) + + # number of parameters + + cat(sprintf("%-30s %30s\n", "model parameters", .cocons.get.npars(object))) + + # covariates names + + coco_info_light <- getDesignMatrix(model.list = object@model.list, data = object@data[1, , drop = FALSE]) + + cat(sprintf("%-30s %10s", "covariates ", paste0(colnames(coco_info_light$model.matrix),collapse = ", "))) + + } + + } +) diff --git a/R/neg2loglikelihood.R b/R/neg2loglikelihood.R index a94ef00..cbf8605 100644 --- a/R/neg2loglikelihood.R +++ b/R/neg2loglikelihood.R @@ -13,7 +13,7 @@ #' @param cholS \code{(S4)} Cholesky object from spam. #' @param z \code{(numeric vector)} a vector of observed values. #' @param n \code{(numeric)} dim(z)\[1\]. -#' @param lambda \code{(numeric)} regularization parameter. +#' @param lambda \code{(numeric vector)} a vector including lambda.Sigma, lambda.betas, lambda.reg (in this order) #' @param safe \code{(TRUE/FALSE)} if \code{TRUE} returns a large pre-defined value under Cholesky errors. Default \code{TRUE}. #' @returns value #' @author Federico Blasi @@ -66,7 +66,7 @@ GetNeg2loglikelihoodTaper <- function(theta, par.pos, ref_taper, locs, #' @param cholS \code{(S4)} Cholesky object from spam. #' @param z \code{(numeric vector)} a vector of observed values. #' @param n \code{(integer)} dim(z)\[1\]. -#' @param lambda \code{(numeric)} regularization parameter. +#' @param lambda \code{(numeric vector)} a vector including lambda.Sigma, lambda.betas, lambda.reg (in this order) #' @param safe \code{(TRUE/FALSE)} if \code{TRUE} returns a large pre-defined value under Cholesky errors. Default \code{TRUE}. #' @returns \code{(numeric)} #' @author Federico Blasi @@ -120,7 +120,7 @@ GetNeg2loglikelihoodTaperProfile <- function(theta, par.pos, ref_taper, locs, #' @param z \code{(numeric vector)} a vector of observed values. #' @param n \code{(integer)} dim(z)\[1\]. #' @param x_betas \code{(matrix) or (data.frame)} design matrix for the spatial mean. -#' @param lambda \code{(numeric)} regularization parameter. +#' @param lambda \code{(numeric vector)} a vector including lambda.Sigma, lambda.betas, lambda.reg (in this order) #' @param safe \code{(TRUE/FALSE)} if \code{TRUE} returns a large pre-defined value under Cholesky errors. Default \code{TRUE}. #' @returns value #' @author Federico Blasi @@ -176,7 +176,7 @@ GetNeg2loglikelihoodProfile <- function(theta, par.pos, locs, x_covariates, #' @param smooth.limits \code{(numeric vector)} smooth.limits. #' @param z \code{(numeric vector)} a vector of observed values. #' @param n \code{(integer)} dim(z)\[1\]. -#' @param lambda \code{(numeric)} regularization parameter. +#' @param lambda \code{(numeric vector)} a vector including lambda.Sigma, lambda.betas, lambda.reg (in this order) #' @param safe \code{(TRUE/FALSE)} if \code{TRUE} returns a large pre-defined value under Cholesky errors. Default \code{TRUE}. #' @returns value #' @author Federico Blasi @@ -234,7 +234,7 @@ GetNeg2loglikelihood <- function(theta, #' @param smooth.limits \code{(numeric vector)} smooth.limits. #' @param z \code{(numeric vector)} a vector of contrasts. #' @param n \code{(integer)} dim(z)\[1\]. -#' @param lambda \code{(numeric)} regularization parameter. +#' @param lambda \code{(numeric vector)} a vector including lambda.Sigma, lambda.betas, lambda.reg (in this order) #' @param safe \code{(TRUE/FALSE)} if \code{TRUE} returns a large pre-defined value under Cholesky errors. Default \code{TRUE}. #' @returns value #' @author Federico Blasi diff --git a/R/optim.R b/R/optim.R index b6db356..015123d 100644 --- a/R/optim.R +++ b/R/optim.R @@ -125,6 +125,109 @@ cocoOptim <- function(coco.object, boundaries = list(), if (coco.object@type == "dense") { + if(any(c(coco.object@info$lambda.betas, coco.object@info$lambda.Sigma) > 0) && optim.type == "ml"){ + + if(is.null(coco.object@info$sparse.point)){ + coco.object@info$sparse.point <- getOption("cocons.sparse.point") + } + + parallel::setDefaultCluster(cl = cl) + parallel::clusterEvalQ(cl, library("cocons")) + + args_optim <- list( + "fn" = cocons::GetNeg2loglikelihood, + "method" = "L-BFGS-B", + "lower" = boundaries$theta_lower, + "par" = boundaries$theta_init, + "upper" = boundaries$theta_upper, + "n" = dim(coco.object@z)[1], + "smooth.limits" = coco.object@info$smooth.limits, + "z" = coco.object@z, + "x_covariates" = mod_DM, + "par.pos" = designMatrix$par.pos, + "lambda" = c(coco.object@info$lambda.Sigma, coco.object@info$lambda.betas, coco.object@info$lambda.reg), + "locs" = coco.object@locs, + "safe" = safe + ) + + rm(designMatrix) + + # Call optim routine + output_dense <- do.call(what = optimParallel::optimParallel, args = c( + args_optim, + optim.control + )) + + coco_pen <- .cocons.update.coco.first.step(coco.object, output_dense, boundaries) + + # .cocons.check.convergence(output_dense, boundaries) + + # Create again design matrix + + if(T){ + + designMatrix <- cocons::getDesignMatrix( + model.list = coco_pen@model.list, + data = coco_pen@data + ) + + # Skip scaling + if(any(colnames(coco_pen@data[,coco_pen@info$skip.scale]) %in% + colnames(coco_pen@data))){ + tmp_info <- .cocons.setDesignMatrixCat(coco_pen, designMatrix = designMatrix) + tmp_values <- tmp_info$tmp_values + mod_DM <- tmp_info$mod_DM + } else { + tmp_values <- cocons::getScale(designMatrix$model.matrix) + mod_DM <- tmp_values$std.covs + } + + args_optim <- list( + "fn" = cocons::GetNeg2loglikelihood, + "method" = "L-BFGS-B", + "lower" = coco_pen@info$boundaries$theta_lower, + "par" = coco_pen@info$boundaries$theta_init, + "upper" = coco_pen@info$boundaries$theta_upper, + "n" = dim(coco_pen@z)[1], + "smooth.limits" = coco_pen@info$smooth.limits, + "z" = coco_pen@z, + "x_covariates" = mod_DM, + "par.pos" = designMatrix$par.pos, + "lambda" = c(0, 0, coco_pen@info$lambda.reg), + "locs" = coco_pen@locs, + "safe" = safe + ) + } + + # + + output_dense_two <- do.call(what = optimParallel::optimParallel, args = c( + args_optim, + optim.control + )) + + coco_pen@output <- output_dense_two + + boundaries_two <- list(theta_init = args_optim$par, + theta_upper = args_optim$upper, + theta_lower = args_optim$lower) + + .cocons.check.convergence(output_dense_two, boundaries_two) + + # END ADDED + + coco_pen@info$optim.control <- optim.control + coco_pen@info$boundaries <- boundaries_two + coco_pen@info$mean.vector <- tmp_values$mean.vector + coco_pen@info$sd.vector <- tmp_values$sd.vector + coco_pen@info$optim.type <- "ml" + coco_pen@info$safe <- safe + coco_pen@info$call <- match.call() + + return(coco_pen) + + } + if(optim.type == "ml"){ parallel::setDefaultCluster(cl = cl) @@ -141,7 +244,7 @@ cocoOptim <- function(coco.object, boundaries = list(), "z" = coco.object@z, "x_covariates" = mod_DM, "par.pos" = designMatrix$par.pos, - "lambda" = coco.object@info$lambda, + "lambda" = c(0, 0, coco.object@info$lambda.reg), "locs" = coco.object@locs, "safe" = safe ) @@ -157,6 +260,8 @@ cocoOptim <- function(coco.object, boundaries = list(), .cocons.check.convergence(output_dense, boundaries) coco.object@output <- output_dense + + coco_pen@info$optim.control <- optim.control coco.object@info$boundaries <- boundaries coco.object@info$mean.vector <- tmp_values$mean.vector coco.object@info$sd.vector <- tmp_values$sd.vector @@ -205,7 +310,7 @@ cocoOptim <- function(coco.object, boundaries = list(), reml = (diag(dim(mod_DM)[1]) - mod_DM %*% solve(crossprod(mod_DM),t(mod_DM))) %*% coco.object@z), "x_covariates" = mod_DM, "par.pos" = tmp_par_pos, - "lambda" = coco.object@info$lambda, + "lambda" = c(0, 0, coco.object@info$lambda.reg), "locs" = coco.object@locs, "x_betas" = x_betas, "safe" = safe @@ -243,6 +348,7 @@ cocoOptim <- function(coco.object, boundaries = list(), .cocons.check.convergence(output_dense, tmp_boundaries) + coco_pen@info$optim.control <- optim.control coco.object@output <- output_dense coco.object@info$boundaries <- tmp_boundaries coco.object@info$mean.vector <- tmp_values$mean.vector @@ -259,6 +365,118 @@ cocoOptim <- function(coco.object, boundaries = list(), if (coco.object@type == "sparse") { + if(any(c(coco.object@info$lambda.betas, coco.object@info$lambda.Sigma) > 0) && optim.type == "ml"){ + + if(is.null(coco.object@info$sparse.point)){ + coco.object@info$sparse.point <- getOption("cocons.sparse.point") + } + + # taper + ref_taper <- coco.object@info$taper( + spam::nearest.dist(coco.object@locs, delta = coco.object@info$delta, upper = NULL), + theta = c(coco.object@info$delta, 1) + ) + + print(summary(ref_taper)) + + parallel::setDefaultCluster(cl = cl) + parallel::clusterEvalQ(cl, library("cocons")) + parallel::clusterEvalQ(cl, library("spam")) + #parallel::clusterEvalQ(cl, library("spam64")) + parallel::clusterEvalQ(cl, options(spam.cholupdatesingular = "error")) + parallel::clusterEvalQ(cl, options(spam.cholsymmetrycheck = FALSE)) + + args_optim <- list( + "fn" = cocons::GetNeg2loglikelihoodTaper, + "method" = "L-BFGS-B", + "lower" = boundaries$theta_lower, + "par" = boundaries$theta_init, + "upper" = boundaries$theta_upper, + "par.pos" = designMatrix$par.pos, + "ref_taper" = ref_taper, + "locs" = coco.object@locs, + "x_covariates" = mod_DM, + "smooth.limits" = coco.object@info$smooth.limits, + "cholS" = spam::chol.spam(ref_taper), + "z" = coco.object@z, + "n" = dim(coco.object@z)[1], + "lambda" = c(coco.object@info$lambda.Sigma, coco.object@info$lambda.betas, coco.object@info$lambda.reg), + "safe" = safe + ) + + # Call optim routine + output_taper <- do.call(what = optimParallel::optimParallel, args = c( + args_optim, + optim.control + )) + + coco_pen <- .cocons.update.coco.first.step(coco.object, output_taper, boundaries) + + # Create again design matrix + + if(T){ + + designMatrix <- cocons::getDesignMatrix( + model.list = coco_pen@model.list, + data = coco_pen@data + ) + + # Skip scaling + if(any(colnames(coco_pen@data[,coco_pen@info$skip.scale]) %in% + colnames(coco_pen@data))){ + tmp_info <- .cocons.setDesignMatrixCat(coco_pen, designMatrix = designMatrix) + tmp_values <- tmp_info$tmp_values + mod_DM <- tmp_info$mod_DM + } else { + tmp_values <- cocons::getScale(designMatrix$model.matrix) + mod_DM <- tmp_values$std.covs + } + + args_optim <- list( + "fn" = cocons::GetNeg2loglikelihoodTaper, + "method" = "L-BFGS-B", + "lower" = coco_pen@info$boundaries$theta_lower, + "par" = coco_pen@info$boundaries$theta_init, + "upper" = coco_pen@info$boundaries$theta_upper, + "par.pos" = designMatrix$par.pos, + "ref_taper" = ref_taper, + "locs" = coco_pen@locs, + "x_covariates" = mod_DM, + "smooth.limits" = coco_pen@info$smooth.limits, + "cholS" = spam::chol.spam(ref_taper), + "z" = coco_pen@z, + "n" = dim(coco.object@z)[1], + "lambda" = c(0, 0, coco_pen@info$lambda.reg), + "safe" = safe + ) + + } + + output_taper_two <- do.call(what = optimParallel::optimParallel, args = c( + args_optim, + optim.control + )) + + coco_pen@output <- output_taper_two + + boundaries_two <- list(theta_init = args_optim$par, + theta_upper = args_optim$upper, + theta_lower = args_optim$lower) + + .cocons.check.convergence(output_taper_two, boundaries_two) + + coco_pen@info$optim.control <- optim.control + coco_pen@info$boundaries <- boundaries_two + coco_pen@info$mean.vector <- tmp_values$mean.vector + coco_pen@info$sd.vector <- tmp_values$sd.vector + coco_pen@info$optim.type <- "ml" + coco_pen@info$safe <- safe + coco_pen@info$call <- match.call() + + return(coco_pen) + + } + if(optim.type == "ml"){ # taper @@ -290,7 +508,7 @@ cocoOptim <- function(coco.object, boundaries = list(), "cholS" = spam::chol.spam(ref_taper), "z" = coco.object@z, "n" = dim(coco.object@z)[1], - "lambda" = coco.object@info$lambda, + "lambda" = c(0, 0, coco.object@info$lambda.reg), "safe" = safe ) @@ -302,6 +520,7 @@ cocoOptim <- function(coco.object, boundaries = list(), .cocons.check.convergence(output_taper, boundaries) + coco_pen@info$optim.control <- optim.control coco.object@output <- output_taper coco.object@info$boundaries <- boundaries coco.object@info$mean.vector <- tmp_values$mean.vector @@ -360,7 +579,7 @@ cocoOptim <- function(coco.object, boundaries = list(), "cholS" = spam::chol.spam(ref_taper), "z" = coco.object@z, "n" = dim(coco.object@z)[1], - "lambda" = coco.object@info$lambda, + "lambda" = c(0, 0, coco.object@info$lambda.reg), "safe" = safe ) @@ -449,6 +668,7 @@ cocoOptim <- function(coco.object, boundaries = list(), .cocons.check.convergence(output_taper, boundaries_temp) + coco_pen@info$optim.control <- optim.control coco.object@output <- output_taper coco.object@info$boundaries <- boundaries_temp coco.object@info$mean.vector <- tmp_values$mean.vector @@ -463,3 +683,4 @@ cocoOptim <- function(coco.object, boundaries = list(), } } + \ No newline at end of file diff --git a/R/profile.R b/R/profile.R index a352df2..431297d 100644 --- a/R/profile.R +++ b/R/profile.R @@ -6,11 +6,14 @@ "aniso", "tilt", "smooth", "nugget"), + cocons.sparse.point = 1e-4 , + cocons.Optim.Control = list("parallel" = list(forward = FALSE, loginfo = TRUE), "control" = list(factr = 1e-8/.Machine$double.eps, # tolerance of 1e-8 trace = 1, - maxit = 200, + ndeps = .Machine$double.eps^(1/4), + maxit = 500, lmm = 100), # we save more information than the standard 5 for lmm "hessian" = F) diff --git a/README.md b/README.md index 9b8c6e3..7bd16ce 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,9 @@ R package for nonstationary spatial modeling, simulation, and prediction of Gaussian processes with modular covariate-based covariance functions. + +True Process Nonstationary Model Stationary Model + For a brief introduction: ``` R install.packages("cocons") diff --git a/gifs/classic_model.gif b/gifs/classic_model.gif new file mode 100644 index 0000000..af8bd83 Binary files /dev/null and b/gifs/classic_model.gif differ diff --git a/gifs/ns_model.gif b/gifs/ns_model.gif new file mode 100644 index 0000000..733acb0 Binary files /dev/null and b/gifs/ns_model.gif differ diff --git a/gifs/true_model.gif b/gifs/true_model.gif new file mode 100644 index 0000000..e4c0190 Binary files /dev/null and b/gifs/true_model.gif differ diff --git a/inst/doc/cocons.pdf b/inst/doc/cocons.pdf index 0a145a4..e8a2631 100644 Binary files a/inst/doc/cocons.pdf and b/inst/doc/cocons.pdf differ diff --git a/man/GetNeg2loglikelihood.Rd b/man/GetNeg2loglikelihood.Rd index 9f3a783..11fdfb9 100644 --- a/man/GetNeg2loglikelihood.Rd +++ b/man/GetNeg2loglikelihood.Rd @@ -22,7 +22,7 @@ smooth.limits, z, n, lambda, safe = TRUE) \item{n}{\code{(integer)} dim(z)[1].} -\item{lambda}{\code{(numeric)} regularization parameter.} +\item{lambda}{\code{(numeric vector)} a vector including lambda.Sigma, lambda.betas, lambda.reg (in this order)} \item{safe}{\code{(TRUE/FALSE)} if \code{TRUE} returns a large pre-defined value under Cholesky errors. Default \code{TRUE}.} } diff --git a/man/GetNeg2loglikelihoodProfile.Rd b/man/GetNeg2loglikelihoodProfile.Rd index ae64673..d34ece2 100644 --- a/man/GetNeg2loglikelihoodProfile.Rd +++ b/man/GetNeg2loglikelihoodProfile.Rd @@ -24,7 +24,7 @@ smooth.limits, z, n, x_betas,lambda, safe = TRUE) \item{x_betas}{\code{(matrix) or (data.frame)} design matrix for the spatial mean.} -\item{lambda}{\code{(numeric)} regularization parameter.} +\item{lambda}{\code{(numeric vector)} a vector including lambda.Sigma, lambda.betas, lambda.reg (in this order)} \item{safe}{\code{(TRUE/FALSE)} if \code{TRUE} returns a large pre-defined value under Cholesky errors. Default \code{TRUE}.} } diff --git a/man/GetNeg2loglikelihoodREML.Rd b/man/GetNeg2loglikelihoodREML.Rd index 3d4bd03..2910125 100644 --- a/man/GetNeg2loglikelihoodREML.Rd +++ b/man/GetNeg2loglikelihoodREML.Rd @@ -24,7 +24,7 @@ smooth.limits, z, n, lambda, safe = TRUE) \item{n}{\code{(integer)} dim(z)[1].} -\item{lambda}{\code{(numeric)} regularization parameter.} +\item{lambda}{\code{(numeric vector)} a vector including lambda.Sigma, lambda.betas, lambda.reg (in this order)} \item{safe}{\code{(TRUE/FALSE)} if \code{TRUE} returns a large pre-defined value under Cholesky errors. Default \code{TRUE}.} } diff --git a/man/GetNeg2loglikelihoodTaper.Rd b/man/GetNeg2loglikelihoodTaper.Rd index be03a7b..b679549 100644 --- a/man/GetNeg2loglikelihoodTaper.Rd +++ b/man/GetNeg2loglikelihoodTaper.Rd @@ -26,7 +26,7 @@ x_covariates, smooth.limits, cholS, z, n, lambda, safe = TRUE) \item{n}{\code{(numeric)} dim(z)[1].} -\item{lambda}{\code{(numeric)} regularization parameter.} +\item{lambda}{\code{(numeric vector)} a vector including lambda.Sigma, lambda.betas, lambda.reg (in this order)} \item{safe}{\code{(TRUE/FALSE)} if \code{TRUE} returns a large pre-defined value under Cholesky errors. Default \code{TRUE}.} } diff --git a/man/GetNeg2loglikelihoodTaperProfile.Rd b/man/GetNeg2loglikelihoodTaperProfile.Rd index f21f227..0ccbded 100644 --- a/man/GetNeg2loglikelihoodTaperProfile.Rd +++ b/man/GetNeg2loglikelihoodTaperProfile.Rd @@ -26,7 +26,7 @@ locs, x_covariates, smooth.limits, cholS, z, n, lambda, safe = TRUE) \item{n}{\code{(integer)} dim(z)[1].} -\item{lambda}{\code{(numeric)} regularization parameter.} +\item{lambda}{\code{(numeric vector)} a vector including lambda.Sigma, lambda.betas, lambda.reg (in this order)} \item{safe}{\code{(TRUE/FALSE)} if \code{TRUE} returns a large pre-defined value under Cholesky errors. Default \code{TRUE}.} } diff --git a/man/coco.Rd b/man/coco.Rd index 106a45d..54c4278 100644 --- a/man/coco.Rd +++ b/man/coco.Rd @@ -52,7 +52,10 @@ where \eqn{\boldsymbol{\beta}}, \eqn{\boldsymbol{\alpha}}, \eqn{\boldsymbol{\the \eqn{\nu_{l}}, and \eqn{\nu_{u}} are the lower and upper bounds limiting the range of variation of the spatially-varying smoothness, and where \eqn{\boldsymbol{X}_{\ell}} relates to the design matrix defined by the specific models for each of the source of nonstationarity. Lastly, arguments for the \code{"info"} list argument involve: \itemize{ -\item \code{"lambda"}: (\code{numeric}) a positive scalar specifying the regularization parameter. Larger values penalizes highly-smoothed long-tailed covariance functions. +\item \code{"lambda.reg"}: (\code{numeric}) a positive scalar specifying the regularization parameter. Larger values regularize highly-smoothed long-tailed covariance functions. +\item \code{"lambda.Sigma"}: (\code{numeric}) a positive scalar specifying the penalization parameter for the covariate-driven covariance parameters. +\item \code{"lambda.betas"}: (\code{numeric}) a positive scalar specifying the penalization parameter for the covariate-driven spatial mean parameters. +\item \code{"sparse.point"}: (\code{numeric}) a cutting point for which smaller coefficients in absolute value will be set to zero after the smoothed L1 penalization optimization. Used in combination with \code{lambda.Sigma} and \code{lambda.betas}. By default, it is set to \code{1e-4}. \item \code{"smooth.limits"}: (\code{numeric vector}) specifying the range of variation for the spatially varying smoothness (e.g. c(0.5, 2.5)). \item \code{"taper"}: (\code{numeric}) specifying the desired taper function from the spam package (only for "sparse" coco objects). \item \code{"delta"}: (\code{numeric}) specifying the taper range/scale (only for "sparse" coco objects). diff --git a/man/getBoundariesV4.Rd b/man/getBoundariesV4.Rd new file mode 100644 index 0000000..3c99169 --- /dev/null +++ b/man/getBoundariesV4.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/getFunctions.R +\name{getBoundariesV4} +\alias{getBoundariesV4} +\title{Simple build of boundaries} +\usage{ +getBoundariesV4(coco.object, lower.bound, upper.bound) +} +\arguments{ +\item{coco.object}{\code{(S4)} a coco.object.} + +\item{lower.bound}{\code{(numeric vector)} lower bound for covariate-driven parameters of the nonstationary covariance function.} + +\item{upper.bound}{\code{(numeric vector)} upper bound for covariate-driven parameters of the nonstationary covariance function.} +} +\value{ +(\code{list}) a list with boundaries and simple init values for the optim L-BFGS-B routine +} +\description{ +provides a set of upper and lower bounds for the L-BFGS-B routine +} +\author{ +Federico Blasi +} diff --git a/man/show-methods.Rd b/man/show-methods.Rd new file mode 100644 index 0000000..bad6a6f --- /dev/null +++ b/man/show-methods.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/methods.R +\docType{methods} +\name{show} +\alias{show} +\alias{show,coco-method} +\title{Show Method for Coco Class} +\usage{ +\S4method{show}{coco}(object) +} +\arguments{ +\item{object}{(\code{S4}) An object of class 'coco'.} +} +\value{ +A plot is created. +} +\description{ +This method show objects of class 'coco'. +} +\author{ +Federico Blasi +} diff --git a/man/sumsmoothlone.Rd b/man/sumsmoothlone.Rd new file mode 100644 index 0000000..dc68bba --- /dev/null +++ b/man/sumsmoothlone.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/RcppExports.R +\name{sumsmoothlone} +\alias{sumsmoothlone} +\title{smoothed-L1 penalization over the covariate-driven parameters} +\usage{ +sumsmoothlone(x, lambda, alpha = 1e+06) +} +\arguments{ +\item{x}{a vector of parameters to penalize} + +\item{lambda}{penalization hyperparameter} + +\item{alpha}{hyperparameter to approximate L1 penalization. By default 1e6 provides a decent approximation to L1, while also being a differentiable function} +} +\value{ +penalization +} +\description{ +smoothed-L1 penalization over the covariate-driven parameters +} diff --git a/src/RcppExports.cpp b/src/RcppExports.cpp index a51731c..593b2b0 100644 --- a/src/RcppExports.cpp +++ b/src/RcppExports.cpp @@ -11,6 +11,19 @@ Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); #endif +// sumsmoothlone +double sumsmoothlone(NumericVector& x, double lambda, double alpha); +RcppExport SEXP _cocons_sumsmoothlone(SEXP xSEXP, SEXP lambdaSEXP, SEXP alphaSEXP) { +BEGIN_RCPP + Rcpp::RObject rcpp_result_gen; + Rcpp::RNGScope rcpp_rngScope_gen; + Rcpp::traits::input_parameter< NumericVector& >::type x(xSEXP); + Rcpp::traits::input_parameter< double >::type lambda(lambdaSEXP); + Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP); + rcpp_result_gen = Rcpp::wrap(sumsmoothlone(x, lambda, alpha)); + return rcpp_result_gen; +END_RCPP +} // cov_rns NumericMatrix cov_rns(List& theta, NumericMatrix& locs, NumericMatrix& x_covariates, NumericVector& smooth_limits); RcppExport SEXP _cocons_cov_rns(SEXP thetaSEXP, SEXP locsSEXP, SEXP x_covariatesSEXP, SEXP smooth_limitsSEXP) { @@ -90,6 +103,7 @@ END_RCPP } static const R_CallMethodDef CallEntries[] = { + {"_cocons_sumsmoothlone", (DL_FUNC) &_cocons_sumsmoothlone, 3}, {"_cocons_cov_rns", (DL_FUNC) &_cocons_cov_rns, 4}, {"_cocons_cov_rns_pred", (DL_FUNC) &_cocons_cov_rns_pred, 6}, {"_cocons_cov_rns_classic", (DL_FUNC) &_cocons_cov_rns_classic, 3}, diff --git a/src/RcppExports.o b/src/RcppExports.o index 6c9d50b..b9e67b4 100644 Binary files a/src/RcppExports.o and b/src/RcppExports.o differ diff --git a/src/cocons.so b/src/cocons.so index 09032c3..c18c51a 100644 Binary files a/src/cocons.so and b/src/cocons.so differ diff --git a/src/cocons_full.cpp b/src/cocons_full.cpp index ee6ec42..e2d9987 100644 --- a/src/cocons_full.cpp +++ b/src/cocons_full.cpp @@ -2,6 +2,33 @@ using namespace Rcpp; +//' smoothed-L1 penalization over the covariate-driven parameters +//' +//' @param x a vector of parameters to penalize +//' @param lambda penalization hyperparameter +//' @param alpha hyperparameter to approximate L1 penalization. By default 1e6 provides a decent approximation to L1, while also being a differentiable function +//' @return penalization +// [[Rcpp::export]] +double sumsmoothlone(NumericVector& x, + double lambda, + double alpha = 1e6){ + + double sum = 0; + + for(int ww = 0; ww < x.length(); ww++){ + + if(std::abs(x[ww]) > 1e-4){ + sum = sum + std::abs(x[ww]); + } else{ + sum = sum + std::pow(alpha,-1) * (std::log(1 + std::exp(-alpha * x[ww])) + std::log(1 + exp(alpha * x[ww]))); + } + + } + + return lambda * sum; + +} + //' Dense covariance function (difference parameterization) //' //' @param theta vector of parameters @@ -260,13 +287,27 @@ NumericMatrix cov_rns(List& theta, } else{ - dist_matrix(ii,jj) = dist_matrix(jj,ii) = std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * - std::pow(smooth_s_Q_ij, smtns) * boost::math::cyl_bessel_k(smtns, smooth_s_Q_ij) * - sigma_vector[ii] * sigma_vector[jj] * - std::sqrt(dets_vector[ii] * std::sin(tilt_vector[ii]) * - dets_vector[jj] * std::sin(tilt_vector[jj])) / std::sqrt(det_ij); + + if(smooth_s_Q_ij < 706.0){ + + dist_matrix(ii,jj) = dist_matrix(jj,ii) = std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * + std::pow(smooth_s_Q_ij, smtns) * boost::math::cyl_bessel_k(smtns, smooth_s_Q_ij) * + sigma_vector[ii] * sigma_vector[jj] * + std::sqrt(dets_vector[ii] * std::sin(tilt_vector[ii]) * + dets_vector[jj] * std::sin(tilt_vector[jj])) / std::sqrt(det_ij); + + } else { + + dist_matrix(ii,jj) = dist_matrix(jj,ii) = dist_matrix(jj,ii) = std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * + std::pow(smooth_s_Q_ij, smtns) * std::sqrt(M_PI / (2.0 * smooth_s_Q_ij)) * std::exp(-smooth_s_Q_ij) * + sigma_vector[ii] * sigma_vector[jj] * + std::sqrt(dets_vector[ii] * std::sin(tilt_vector[ii]) * + dets_vector[jj] * std::sin(tilt_vector[jj])) / std::sqrt(det_ij); + + } continue; + } } } @@ -402,11 +443,24 @@ NumericMatrix cov_rns_pred(List& theta, NumericMatrix& locs, } else{ - dist_matrix(ii,jj) = std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * - std::pow(smooth_s_Q_ij, smtns) * boost::math::cyl_bessel_k(smtns, smooth_s_Q_ij) * - p_sigma_vector[ii] * sigma_vector[jj] * - std::sqrt(p_dets_vector[ii] * std::sin(p_tilt_vector[ii]) * - dets_vector[jj] * std::sin(tilt_vector[jj])) / std::sqrt(det_ij); + + if(smooth_s_Q_ij < 706.0){ + + dist_matrix(ii,jj) = std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * + std::pow(smooth_s_Q_ij, smtns) * boost::math::cyl_bessel_k(smtns, smooth_s_Q_ij) * + p_sigma_vector[ii] * sigma_vector[jj] * + std::sqrt(p_dets_vector[ii] * std::sin(p_tilt_vector[ii]) * + dets_vector[jj] * std::sin(tilt_vector[jj])) / std::sqrt(det_ij); + + } else { + + dist_matrix(ii,jj) = std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * + std::pow(smooth_s_Q_ij, smtns) * std::sqrt(M_PI / (2.0 * smooth_s_Q_ij)) * std::exp(-smooth_s_Q_ij) * + p_sigma_vector[ii] * sigma_vector[jj] * + std::sqrt(p_dets_vector[ii] * std::sin(p_tilt_vector[ii]) * + dets_vector[jj] * std::sin(tilt_vector[jj])) / std::sqrt(det_ij); + + } } } @@ -513,11 +567,23 @@ NumericMatrix cov_rns_classic(List& theta, NumericMatrix& locs, NumericMatrix& x } else{ - dist_matrix(ii,jj) = dist_matrix(jj,ii) = std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * - std::pow(smooth_s_Q_ij, smtns) * boost::math::cyl_bessel_k(smtns, smooth_s_Q_ij) * - sigma_vector[ii] * sigma_vector[jj] * - std::sqrt(dets_vector[ii] * std::sin(tilt_vector[ii]) * - dets_vector[jj] * std::sin(tilt_vector[jj])) / std::sqrt(det_ij); + if(smooth_s_Q_ij < 706.0){ + + dist_matrix(ii,jj) = dist_matrix(jj,ii) = std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * + std::pow(smooth_s_Q_ij, smtns) * boost::math::cyl_bessel_k(smtns, smooth_s_Q_ij) * + sigma_vector[ii] * sigma_vector[jj] * + std::sqrt(dets_vector[ii] * std::sin(tilt_vector[ii]) * + dets_vector[jj] * std::sin(tilt_vector[jj])) / std::sqrt(det_ij); + + } else { + + dist_matrix(ii,jj) = dist_matrix(jj,ii) = dist_matrix(jj,ii) = std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * + std::pow(smooth_s_Q_ij, smtns) * std::sqrt(M_PI / (2.0 * smooth_s_Q_ij)) * std::exp(-smooth_s_Q_ij) * + sigma_vector[ii] * sigma_vector[jj] * + std::sqrt(dets_vector[ii] * std::sin(tilt_vector[ii]) * + dets_vector[jj] * std::sin(tilt_vector[jj])) / std::sqrt(det_ij); + + } } } diff --git a/src/cocons_full.o b/src/cocons_full.o index 4234ca5..bae6cb6 100644 Binary files a/src/cocons_full.o and b/src/cocons_full.o differ diff --git a/src/cocons_taper.cpp b/src/cocons_taper.cpp index b287210..2362f22 100644 --- a/src/cocons_taper.cpp +++ b/src/cocons_taper.cpp @@ -112,9 +112,19 @@ NumericVector cov_rns_taper_pred(List& theta, } else{ - dist_vector(acumulating) = prefactor * std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * - std::pow(smooth_s_Q_ij, smtns) * boost::math::cyl_bessel_k(smtns, smooth_s_Q_ij) * - sigma_vector_pred[ii] * sigma_vector[jj]; + if(smooth_s_Q_ij < 706.0){ + + dist_vector(acumulating) = prefactor * std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * + std::pow(smooth_s_Q_ij, smtns) * boost::math::cyl_bessel_k(smtns, smooth_s_Q_ij) * + sigma_vector_pred[ii] * sigma_vector[jj]; + + } else { + + dist_vector(acumulating) = prefactor * std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * + std::pow(smooth_s_Q_ij, smtns) * std::sqrt(M_PI / (2.0 * smooth_s_Q_ij)) * std::exp(-smooth_s_Q_ij) * + sigma_vector_pred[ii] * sigma_vector[jj]; + + } } @@ -389,9 +399,21 @@ NumericVector cov_rns_taper(List& theta, } else{ - dist_vector(acumulating) = prefactor * std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * - std::pow(smooth_s_Q_ij, smtns) * boost::math::cyl_bessel_k(smtns, smooth_s_Q_ij) * - sigma_vector[ii] * sigma_vector[jj]; + if(smooth_s_Q_ij < 706.0){ + + dist_vector(acumulating) = prefactor * std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * + std::pow(smooth_s_Q_ij, smtns) * boost::math::cyl_bessel_k(smtns, smooth_s_Q_ij) * + sigma_vector[ii] * sigma_vector[jj]; + + } else { + + dist_vector(acumulating) = prefactor * std::pow(2.0 , -(smtns - 1)) / std::tgamma(smtns) * + std::pow(smooth_s_Q_ij, smtns) * std::sqrt(M_PI / (2.0 * smooth_s_Q_ij)) * std::exp(-smooth_s_Q_ij) * + sigma_vector[ii] * sigma_vector[jj]; + + } + + } diff --git a/src/cocons_taper.o b/src/cocons_taper.o index 9185a03..af06c0c 100644 Binary files a/src/cocons_taper.o and b/src/cocons_taper.o differ